Skip to content

Commit

Permalink
WIP: tests: add test for bare-metal with ipv6
Browse files Browse the repository at this point in the history
IPv6 brings some new complexities, particularly around IPAM.
  • Loading branch information
justinsb committed Nov 12, 2024
1 parent 80806a8 commit 0acc406
Show file tree
Hide file tree
Showing 4 changed files with 363 additions and 27 deletions.
26 changes: 26 additions & 0 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,29 @@ jobs:
with:
name: tests-e2e-scenarios-bare-metal
path: /tmp/artifacts/

tests-e2e-scenarios-bare-metal-ipv6:
runs-on: ubuntu-24.04
timeout-minutes: 70
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
path: ${{ env.GOPATH }}/src/k8s.io/kops

- name: Set up go
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed
with:
go-version-file: '${{ env.GOPATH }}/src/k8s.io/kops/go.mod'

- name: tests/e2e/scenarios/bare-metal/run-test
working-directory: ${{ env.GOPATH }}/src/k8s.io/kops
run: |
timeout 60m tests/e2e/scenarios/bare-metal/scenario-ipv6
env:
ARTIFACTS: /tmp/artifacts
- name: Archive production artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: tests-e2e-scenarios-bare-metal-ipv6
path: /tmp/artifacts/
2 changes: 2 additions & 0 deletions tests/e2e/scenarios/bare-metal/cleanup
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ sudo ip link del dev tap-vm0 || true
sudo ip link del dev tap-vm1 || true
sudo ip link del dev tap-vm2 || true

sudo ip link del dev br0 || true

rm -rf .build/vm0
rm -rf .build/vm1
rm -rf .build/vm2
Expand Down
64 changes: 37 additions & 27 deletions tests/e2e/scenarios/bare-metal/run-test
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,16 @@ fi
rm -rf ${WORKDIR}/s3
mkdir -p ${WORKDIR}/s3/

IPV4_PREFIX=10.123.45.

VM0_IP=${IPV4_PREFIX}10
VM1_IP=${IPV4_PREFIX}11
VM2_IP=${IPV4_PREFIX}12

# Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
eval $(ssh-agent)
ssh-add ${REPO_ROOT}/.build/.ssh/id_ed25519

# Start our VMs
${REPO_ROOT}/tests/e2e/scenarios/bare-metal/start-vms

Expand All @@ -55,13 +65,14 @@ echo "Waiting 10 seconds for VMs to start"
sleep 10

# Remove from known-hosts in case of reuse
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.10 || true
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.11 || true
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.12 || true
ssh-keygen -f ~/.ssh/known_hosts -R ${VM0_IP} || true
ssh-keygen -f ~/.ssh/known_hosts -R ${VM1_IP} || true
ssh-keygen -f ~/.ssh/known_hosts -R ${VM2_IP} || true

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] uptime
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] uptime
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] uptime
# Check SSH is working and accept the keys
ssh -o StrictHostKeyChecking=accept-new root@${VM0_IP} uptime
ssh -o StrictHostKeyChecking=accept-new root@${VM1_IP} uptime
ssh -o StrictHostKeyChecking=accept-new root@${VM2_IP} uptime

cd ${REPO_ROOT}

Expand Down Expand Up @@ -91,7 +102,7 @@ ${KOPS} create cluster --cloud=metal metal.k8s.local --zones main --networking c

# Set the IP ingress, required for metal cloud
# TODO: is this the best option?
${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=10.123.45.10
${KOPS} edit cluster metal.k8s.local --set spec.api.publicName=${VM0_IP}

# Use latest etcd-manager image (while we're adding features)
${KOPS} edit cluster metal.k8s.local --set 'spec.etcdClusters[*].manager.image=us-central1-docker.pkg.dev/k8s-staging-images/etcd-manager/etcd-manager-static:latest'
Expand All @@ -112,32 +123,31 @@ ${KOPS} get ig --name metal.k8s.local -oyaml
${KOPS} update cluster metal.k8s.local
${KOPS} update cluster metal.k8s.local --yes --admin

# Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
eval $(ssh-agent)
ssh-add ${REPO_ROOT}/.build/.ssh/id_ed25519

# Enroll the control-plane VM
${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host 10.123.45.10 --v=2
${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group control-plane-main --host ${VM0_IP} --v=2

# Manual creation of "volumes" for etcd, and setting up peer nodes
cat <<EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] tee -a /etc/hosts
cat <<EOF | ssh root@${VM0_IP} tee -a /etc/hosts
# Hosts added for etcd discovery
10.123.45.10 node0.main.metal.k8s.local
10.123.45.10 node0.events.metal.k8s.local
${VM0_IP} node0.main.metal.k8s.local
${VM0_IP} node0.events.metal.k8s.local
EOF

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] cat /etc/hosts
ssh root@${VM0_IP} cat /etc/hosts

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster
ssh root@${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--main--0/mnt
ssh root@${VM0_IP} touch /mnt/disks/metal.k8s.local--main--0/mnt/please-create-new-cluster

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 [email protected] touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster
ssh root@${VM0_IP} mkdir -p /mnt/disks/metal.k8s.local--events--0/mnt
ssh root@${VM0_IP} touch /mnt/disks/metal.k8s.local--events--0/mnt/please-create-new-cluster


echo "Waiting 300 seconds for kube to start"
sleep 300
echo "Waiting for kube to start"
until kubectl get nodes; do
echo "waiting for kube to start"
sleep 10
done

kubectl get nodes
kubectl get pods -A
Expand Down Expand Up @@ -196,18 +206,18 @@ function enroll_node() {

# Manual "discovery" for control-plane endpoints
# TODO: Replace with well-known IP
cat <<EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@${node_ip} tee -a /etc/hosts
cat <<EOF | ssh root@${node_ip} tee -a /etc/hosts
# Hosts added for leader discovery
10.123.45.10 kops-controller.internal.metal.k8s.local
10.123.45.10 api.internal.metal.k8s.local
${VM0_IP} kops-controller.internal.metal.k8s.local
${VM0_IP} api.internal.metal.k8s.local
EOF

timeout 10m ${KOPS} toolbox enroll --cluster metal.k8s.local --instance-group nodes-main --host ${node_ip} --v=2
}

enroll_node 10.123.45.11
enroll_node 10.123.45.12
enroll_node ${VM1_IP}
enroll_node ${VM2_IP}

echo "Waiting 30 seconds for nodes to be ready"
sleep 30
Expand Down
Loading

0 comments on commit 0acc406

Please sign in to comment.