diff --git a/Makefile b/Makefile index ff333b7..e2fdc16 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,9 @@ # --- Global Variables --- +# Set Pulumi Stack name to a valid Pulumi ORG/PROJECT/STACK name +PULUMI_STACK := $(or ${PULUMI_STACK},"usrbinkat/devcontainer/kubernetes") +# --- GitHub Actions --- GITHUB_REPOSITORY_STRING := $(shell echo ${GITHUB_REPOSITORY} | tr '[:upper:]' '[:lower:]') DEVCONTAINER_NAME ?= $(if ${GITHUB_REPOSITORY_STRING},${GITHUB_REPOSITORY_STRING},pulumi/devcontainer) -PULUMI_STACK := echo ${GITHUB_REPOSITORY} | awk -F '[/]' '{print $2}' DOCKER_IMAGE_NAME := "ghcr.io/${DEVCONTAINER_NAME}:latest" # --- Help --- @@ -41,10 +43,10 @@ esc: login # Deploy Pulumi infrastructure up: - @echo "Deploying Pulumi infrastructure..." - pulumi stack select codespace || pulumi stack init codespace && pulumi stack select codespace + @echo "Deploying Pulumi infrastructure... $(PULUMI_STACK)" + pulumi stack select --create $(PULUMI_STACK) pulumi install - pulumi up --stack codespace --yes --skip-preview + pulumi up --yes --skip-preview --stack ${PULUMI_STACK} sleep 10 kubectl get po -A @echo "Deployment complete." @@ -52,18 +54,18 @@ up: # Destroy Pulumi infrastructure down: @echo "Destroying Pulumi infrastructure..." - pulumi down --stack codespace --yes --skip-preview + pulumi down--yes --skip-preview --stack ${PULUMI_STACK} 2>/dev/null || true @echo "Infrastructure teardown complete." # --- Kind --- kind: @echo "Creating Kind Cluster..." direnv allow - docker volume create cilium-worker-n01 - docker volume create cilium-worker-n02 - docker volume create cilium-control-plane-n01 - kind create cluster --config hack/kind.yaml - kind get kubeconfig --name cilium | tee .kube/config >/dev/null + docker volume create pulumi-worker-n01 + docker volume create pulumi-worker-n02 + docker volume create pulumi-control-plane-n01 + kind create cluster --retain --config hack/kind.yaml + kind get kubeconfig --name pulumi | tee .kube/config >/dev/null sleep 5 kubectl get po -A @echo "Kind Cluster Created." @@ -87,15 +89,16 @@ build-push: # --- Cleanup --- clean: down @echo "Cleaning up..." - kind delete cluster --name cilium || true + kind delete cluster --name pulumi || true rm -rf .kube/config || true @echo "Cleanup complete." clean-all: clean - docker volume rm cilium-worker-n01 || true - docker volume rm cilium-worker-n02 || true - docker volume rm cilium-control-plane-n01 || true - @echo "Extended cleanup complete." + @echo "Deep clean in progress..." + docker volume rm pulumi-worker-n01 || true + docker volume rm pulumi-worker-n02 || true + docker volume rm pulumi-control-plane-n01 || true + @echo "Deep cleanup complete." # --- GitHub Actions --- act: diff --git a/hack/kind.yaml b/hack/kind.yaml new file mode 100644 index 0000000..9057a52 --- /dev/null +++ b/hack/kind.yaml @@ -0,0 +1,48 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +name: pulumi +networking: + apiServerPort: 6443 + apiServerAddress: "0.0.0.0" + serviceSubnet: "10.12.0.0/16" + podSubnet: "10.242.0.0/16" + ###################################################################### + # The following is required for Cilium to work with kind + # Uncomment the next 2 lines to replace with cilium cni + # disableDefaultCNI: true # disable kindnet + # kubeProxyMode: none # disable kube-proxy +nodes: +- role: control-plane + image: kindest/node:v1.29.0 + extraMounts: # Persistent image cache + - hostPath: /var/lib/docker/volumes/cilium-control-plane-n01/_data + containerPath: /var/lib/containerd +- role: worker + image: kindest/node:v1.29.0 + extraMounts: # Persistent image cache + - hostPath: /var/lib/docker/volumes/cilium-worker-n01/_data + containerPath: /var/lib/containerd +- role: worker + image: kindest/node:v1.29.0 + extraMounts: # Persistent image cache + - hostPath: /var/lib/docker/volumes/cilium-worker-n02/_data + containerPath: /var/lib/containerd + # Expose ports on the host: + # - 80/tcp + # - 443/tcp + extraPortMappings: + - listenAddress: "0.0.0.0" + protocol: TCP + hostPort: 80 + containerPort: 80 + - listenAddress: "0.0.0.0" + protocol: TCP + hostPort: 443 + containerPort: 443 + kubeadmConfigPatches: + - | + # Label the worker node listening on service ports + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true"