diff --git a/.ansible-lint b/.ansible-lint
index e1ade69..69f59f3 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -2,3 +2,5 @@
profile: production
exclude_paths:
- roles/**/tests/test.yml
+ - .github
+ - .vscode
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index c2d5d33..9060fb9 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -1,5 +1,4 @@
# These github actions will perform linting using pre-commit.
-# yamllint disable rule:line-length
# spell-checker: disable
---
name: pre-commit
@@ -21,14 +20,25 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: install talisman
+ # yamllint disable rule:line-length
run: |
sudo curl -sLo /usr/local/bin/talisman https://github.com/thoughtworks/talisman/releases/download/v1.32.0/talisman_linux_amd64
sudo chmod 0755 /usr/local/bin/talisman
+ # yamllint enable rule:line-length
- name: Install terraform-docs
+ # yamllint disable rule:line-length
run: |
sudo sh -c 'curl -sL https://github.com/terraform-docs/terraform-docs/releases/download/v0.18.0/terraform-docs-v0.18.0-linux-amd64.tar.gz | tar xzf - -C /usr/local/bin'
sudo chmod 0755 /usr/local/bin/terraform-docs
+ # yamllint enable rule:line-length
- uses: actions/setup-python@v5
with:
python-version: '3.12'
+ cache: pip
+ cache-dependency-path: |
+ requirements*.txt
+ - name: install dependencies
+ run: |
+ pip install -r requirements-dev.txt
+ ansible-galaxy collection install --requirements-file ansible-requirements.yaml
- uses: pre-commit/action@v3.0.1
diff --git a/.gitignore b/.gitignore
index 5249315..d846361 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,8 +28,9 @@ kubeconfig*
!ansible/templates/kubernetes/kubeconfig.user.j2
# Ignore VS Code files, except settings.json
-.vscode
+.vscode/*
!.vscode/settings.json
+!.vscode/cSpell.json
# Ignore any pytest caches
.pytest_cache
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 593f144..0168b28 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -14,6 +14,10 @@ repos:
rev: v24.9.0
hooks:
- id: ansible-lint
+ # Use the full ansible package, not just ansible-core
+ # https://ansible.readthedocs.io/projects/lint/configuring/#pre-commit-setup
+ additional_dependencies:
+ - ansible
- repo: https://github.com/antonbabenko/pre-commit-terraform
rev: v1.95.0
hooks:
diff --git a/.talismanrc b/.talismanrc
index cc946cd..ef355e3 100644
--- a/.talismanrc
+++ b/.talismanrc
@@ -60,7 +60,7 @@ fileignoreconfig:
- filename: roles/shared/tasks/main.yml
checksum: 7fdf44dc8bfabe5839cc4dae3c014e946aff65208280235c04a43db099f8d62b
- filename: roles/vault/defaults/main.yml
- checksum: 2232da9d2fea90e539c0a88742afd740729143d88ac63837affe79b0a060217c
+ checksum: c7d3e5475ec20225a16b17b0f3f01bd3e64f5b27161c6f3decce42bf5cda8020
- filename: roles/vault/tasks/main.yml
checksum: 9db6fa1ea15d1f1221ebecfc536be9657289a85216dce43690bf7ef1a8ee323b
- filename: vault/kickstart-secrets.tf
@@ -70,7 +70,7 @@ fileignoreconfig:
- filename: roles/crio/tasks/ubuntu.yml
checksum: 17977771e05f783f83df3d2e71bd86c6f92be65c02a361072fd98a9041218f62
- filename: roles/crio/tasks/debian.yml
- checksum: 7a55c5565f4ed159e6004f2851804c578aa23b081cc53e9a806e7a816e624d73
+ checksum: 1d556ce69804f0fe827b6547c8adce0f706a6275cd1df87567b19129fa7c1197
- filename: roles/shared/templates/scripts/post-onboard-debian.sh.j2
checksum: b9109b9587d4a02f83de60c1aea350afd12cd6608b080de5f0807ca7ee23aecf
- filename: certs/Makefile
@@ -89,3 +89,5 @@ fileignoreconfig:
checksum: bb7e71a39b91430ce89dce56bcaf94d6d615e151e10b747fb63d5edfa5fdebfe
- filename: kubernetes.yml
checksum: db1c3f78b9659e1d5cbed0397fa02c45142966a8d986a3ae6936b76e01471eee
+- filename: roles/external_secrets/defaults/main.yml
+ checksum: b7cf61e4d734f478ac52808f66f5e256232ecacd65684819101d17de1159208e
diff --git a/.vscode/cSpell.json b/.vscode/cSpell.json
new file mode 100644
index 0000000..7dc3d31
--- /dev/null
+++ b/.vscode/cSpell.json
@@ -0,0 +1,115 @@
+{
+ "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json",
+ "version": "0.2",
+ "language": "en",
+ "words": [
+ "acceleratedgcp",
+ "apiserver",
+ "arpa",
+ "autoclean",
+ "Autodetection",
+ "autoprovision",
+ "autoremove",
+ "bionic",
+ "bpffs",
+ "calicoctl",
+ "centos",
+ "chmod",
+ "CIFS",
+ "creds",
+ "cri-o",
+ "crio",
+ "Customise",
+ "datacenter",
+ "Dataplane",
+ "dearmor",
+ "devel",
+ "dgst",
+ "dpkg",
+ "dvswitch",
+ "elrepo",
+ "endfor",
+ "esxi",
+ "fileignoreconfig",
+ "firewalld",
+ "fstype",
+ "gpgcheck",
+ "gpgkey",
+ "hashi",
+ "hashicorp",
+ "hostvars",
+ "hugepages",
+ "ifname",
+ "ignoreerrors",
+ "ipaddr",
+ "ipmi",
+ "ippool",
+ "iscsi",
+ "iscsid",
+ "isdir",
+ "kube",
+ "kubeadm",
+ "kubeconfig",
+ "kubectl",
+ "kubelet",
+ "kubernetes",
+ "kubic",
+ "libcontainers",
+ "lineinfile",
+ "loadbalancer",
+ "markdownlint",
+ "metacopy",
+ "modinfo",
+ "mountopt",
+ "netboot",
+ "netfilter",
+ "netplan",
+ "nmcli",
+ "nobest",
+ "nodev",
+ "noexec",
+ "noqa",
+ "nosuid",
+ "outform",
+ "paramiko",
+ "posix",
+ "preseed",
+ "projectcalico",
+ "proto",
+ "pubin",
+ "pubkey",
+ "pxelinux",
+ "rbac",
+ "realpath",
+ "redhat",
+ "relatime",
+ "repos",
+ "rhel",
+ "rpcbind",
+ "runc",
+ "sbin",
+ "selectattr",
+ "selinux",
+ "smbd",
+ "storageclass",
+ "swapfs",
+ "swapoff",
+ "syslinux",
+ "tcpport",
+ "testinfra",
+ "tftpd",
+ "Tigera",
+ "udpport",
+ "vcenter",
+ "vcsa",
+ "vesamenu",
+ "vmgmt",
+ "vmware",
+ "vnet",
+ "vsphere",
+ "wontfix"
+ ],
+ "flagWords": [
+ "hte"
+ ]
+}
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 5dbbad3..1b9142c 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,115 +1,6 @@
{
- "ansible.ansible.path": ".direnv/python-3.11.3/bin/ansible",
- "ansible.python.interpreterPath": ".direnv/python-3.11.3/bin/python3",
- "ansible.validation.lint.path": ".direnv/python-3.11.3/bin/ansible-lint",
- "cSpell.words": [
- "acceleratedgcp",
- "apiserver",
- "arpa",
- "autoclean",
- "Autodetection",
- "autoprovision",
- "autoremove",
- "bionic",
- "bpffs",
- "calicoctl",
- "centos",
- "chmod",
- "CIFS",
- "creds",
- "cri-o",
- "crio",
- "Customise",
- "datacenter",
- "Dataplane",
- "dearmor",
- "devel",
- "dgst",
- "dpkg",
- "dvswitch",
- "elrepo",
- "endfor",
- "esxi",
- "fileignoreconfig",
- "firewalld",
- "fstype",
- "gpgcheck",
- "gpgkey",
- "hashi",
- "hashicorp",
- "hostvars",
- "hugepages",
- "ifname",
- "ignoreerrors",
- "ipaddr",
- "ipmi",
- "ippool",
- "iscsi",
- "iscsid",
- "isdir",
- "kube",
- "kubeadm",
- "kubeconfig",
- "kubectl",
- "kubelet",
- "kubernetes",
- "kubic",
- "libcontainers",
- "lineinfile",
- "loadbalancer",
- "markdownlint",
- "metacopy",
- "modinfo",
- "mountopt",
- "netboot",
- "netfilter",
- "netplan",
- "nmcli",
- "nobest",
- "nodev",
- "noexec",
- "noqa",
- "nosuid",
- "outform",
- "paramiko",
- "posix",
- "preseed",
- "projectcalico",
- "proto",
- "pubin",
- "pubkey",
- "pxelinux",
- "rbac",
- "realpath",
- "redhat",
- "relatime",
- "repos",
- "rhel",
- "rpcbind",
- "runc",
- "sbin",
- "selectattr",
- "selinux",
- "smbd",
- "storageclass",
- "swapfs",
- "swapoff",
- "syslinux",
- "tcpport",
- "testinfra",
- "tftpd",
- "Tigera",
- "udpport",
- "vcenter",
- "vcsa",
- "vesamenu",
- "vmgmt",
- "vmware",
- "vnet",
- "vsphere",
- "wontfix"
- ],
"yaml.schemas": {
- "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible-lint-config.json": "/.ansible-lint"
- }
+ "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible-lint-config.json": ".ansible-lint"
+ },
+ "editor.rulers": [120]
}
diff --git a/.yamllint.yml b/.yamllint.yml
new file mode 100644
index 0000000..f05a7bc
--- /dev/null
+++ b/.yamllint.yml
@@ -0,0 +1,16 @@
+---
+extends: default
+
+rules:
+ line-length:
+ max: 120
+ level: warning
+ # Force yamllint rules to match ansible-lint requirements
+ braces:
+ max-spaces-inside: 1
+ comments:
+ min-spaces-from-content: 1
+ comments-indentation: disable
+ octal-values:
+ forbid-explicit-octal: true
+ forbid-implicit-octal: true
diff --git a/ansible-requirements.yaml b/ansible-requirements.yaml
index 88af2e6..8990ba2 100644
--- a/ansible-requirements.yaml
+++ b/ansible-requirements.yaml
@@ -2,14 +2,14 @@
---
collections:
- name: ansible.posix
- version: 1.5.1
+ version: 1.5.4
- name: ansible.utils
- version: 2.9.0
+ version: 4.1.0
- name: community.crypto
- version: 2.11.1
+ version: 2.22.0
- name: community.general
- version: 6.5.0
+ version: 9.4.0
- name: community.hashi_vault
- version: 4.2.0
+ version: 6.2.0
- name: kubernetes.core
- version: 2.4.0
+ version: 3.2.0
diff --git a/ansible/calico.yml b/ansible/calico.yml
deleted file mode 100644
index cfa4bff..0000000
--- a/ansible/calico.yml
+++ /dev/null
@@ -1,169 +0,0 @@
-# Install Calico as CNI
-# yamllint disable rule:truthy rule:line-length
----
-- name: Prepare all hosts for kubernetes
- hosts: servers
- become: yes
- tasks:
- - name: Check for NetworkManager conf.d
- ansible.builtin.stat:
- path: /etc/NetworkManager/conf.d
- register: nm_conf_d
- tags:
- - base
- - name: Add NetworkManager override for Calico
- ansible.builtin.template:
- src: templates/kubernetes/nm-calico.conf.j2
- dest: /etc/NetworkManager/conf.d/calico.conf
- owner: root
- group: root
- mode: 0644
- when: nm_conf_d.stat.isdir is defined and nm_conf_d.stat.isdir
- tags:
- - base
- - name: Reload NetworkManager configuration
- ansible.builtin.command: nmcli connection reload
- when: nm_conf_d.stat.isdir is defined and nm_conf_d.stat.isdir
- tags:
- - base
- - name: Install calicoctl
- ansible.builtin.shell: |
- curl -sLo /run/calicoctl https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64 && chmod 0755 /run/calicoctl && mv /run/calicoctl /usr/local/bin/calicoctl
- args:
- creates: /usr/local/bin/calicoctl
- when: k8s_master | default(False)
- tags:
- - base
-- name: Install Calico
- hosts: localhost
- become: no
- gather_facts: no
- vars:
- bgp_peer: "{{ hostvars[groups['servers'][0]]['vlan100_addresses'] | ansible.utils.ipv4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address') | first }}"
- tasks:
- - name: Create tigera-operator namespace
- kubernetes.core.k8s:
- state: present
- definition:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: tigera-operator
- tags:
- - init
- - name: Configure Calico to replace kube-proxy
- kubernetes.core.k8s:
- state: present
- definition:
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: kubernetes-services-endpoint
- namespace: tigera-operator
- data:
- KUBERNETES_SERVICE_HOST: "{{ kubernetes_api_address }}"
- KUBERNETES_SERVICE_PORT: "{{ kubernetes_api_port | default(6443) }}"
- tags:
- - init
- - name: Install Tigera operator
- ansible.builtin.command: kubectl apply -f https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml
- tags:
- - init
- - name: Wait for Tigera operator to be ready
- kubernetes.core.k8s_info:
- kind: Deployment
- namespace: tigera-operator
- name: tigera-operator
- register: operator_ready
- until: operator_ready.resources | json_query('[*].status.conditions[?reason==`MinimumReplicasAvailable`][].status') | select('match','True') | list | length >= 1
- delay: 10
- retries: 6
- tags:
- - init
- - name: Customise Calico installation
- kubernetes.core.k8s:
- state: present
- definition:
- apiVersion: operator.tigera.io/v1
- kind: Installation
- metadata:
- name: default
- spec:
- calicoNetwork:
- linuxDataplane: BPF
- bgp: Enabled
- ipPools:
- - cidr: "{{ kubernetes_pod_cidr | default('10.0.0.0/16') }}"
- encapsulation: None
- natOutgoing: Enabled
- nodeSelector: all()
- blockSize: 24
- nodeAddressAutodetectionV4:
- canReach: "{{ bgp_peer }}"
- tags:
- - config
- - name: Enable Calico API server
- kubernetes.core.k8s:
- state: present
- definition:
- apiVersion: operator.tigera.io/v1
- kind: APIServer
- metadata:
- name: default
- spec: {}
- tags:
- - config
- - name: Wait for Calico to be ready
- kubernetes.core.k8s_info:
- kind: TigeraStatus
- name: calico
- register: calico_ready
- until: calico_ready.resources | json_query('[*].status.conditions[?type==`Available`][].status') | select('match','True') | list | length >= 1
- delay: 10
- retries: 12
- tags:
- - config
- - name: Ensure Calico API server is ready
- kubernetes.core.k8s_info:
- kind: TigeraStatus
- name: apiserver
- register: calico_ready
- until: calico_ready.resources | json_query('[*].status.conditions[?type==`Available`][].status') | select('match','True') | list | length >= 1
- delay: 5
- retries: 5
- tags:
- - config
- - name: Set Calico BGPPeer to upstream router
- kubernetes.core.k8s:
- state: present
- definition:
- apiVersion: projectcalico.org/v3
- kind: BGPPeer
- metadata:
- name: router-ipv4-peer
- spec:
- peerIP: "{{ bgp_peer }}"
- asNumber: "{{ bgp_peer_asn | default('64512') }}"
- tags:
- - bgp
- - name: Update Calico BGPConfiguration
- kubernetes.core.k8s:
- state: present
- definition:
- apiVersion: projectcalico.org/v3
- kind: BGPConfiguration
- metadata:
- name: default
- spec:
- logSeverityScreen: Info
- nodeToNodeMeshEnabled: false
- asNumber: "{{ bgp_asn | default('64513') }}"
- listenPort: 179
- serviceClusterIPs:
- - cidr: "{{ kubernetes_service_cidr | default('10.1.0.0/22') }}"
- serviceExternalIPs:
- - cidr: "{{ kubernetes_external_cidr | default('10.2.0.0/24') }}"
- serviceLoadBalancerIPs:
- - cidr: "{{ kubernetes_lb_cidr | default('10.3.0.0/24') }}"
- tags:
- - bgp
diff --git a/ansible/cilium.yml b/ansible/cilium.yml
deleted file mode 100644
index f68d00a..0000000
--- a/ansible/cilium.yml
+++ /dev/null
@@ -1,84 +0,0 @@
-# Install Cilium as CNI
-# yamllint disable rule:truthy rule:line-length
----
-- name: Gather facts
- hosts: servers
- become: yes
- gather_facts: yes
-- name: Install Cilium
- hosts: localhost
- become: no
- gather_facts: no
- vars:
- dataplane_device: 'bond0.100'
- bgp_peer: "{{ hostvars[groups['servers'][0]]['vlan100_addresses'] | ansible.utils.ipv4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address') | first }}"
- tasks:
- - name: Create BGP ConfigMap
- kubernetes.core.k8s:
- state: present
- definition:
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: bgp-config
- namespace: kube-system
- data:
- config.yaml: |
- peers:
- - peer-address: {{ bgp_peer }}
- peer-asn: {{ bgp_peer_asn | default('64512') }}
- my-asn: {{ bgp_asn | default('64513') }}
- address-pools:
- - name: default
- protocol: bgp
- addresses:
- - {{ kubernetes_lb_cidr | default('10.3.0.0/24') }}
- tags:
- - init
- - name: Add Cilium helm repo
- kubernetes.core.helm_repository:
- name: cilium
- repo_url: https://helm.cilium.io/
- tags:
- - init
- - name: Install Cilium from chart
- kubernetes.core.helm:
- name: cilium
- chart_ref: cilium/cilium
- chart_version: "{{ cilium_version }}"
- update_repo_cache: yes
- force: yes
- wait: yes
- release_namespace: kube-system
- values:
- bgp:
- enabled: true
- announce:
- loadbalancerIP: true
- podCIDR: true
- extraArgs:
- - "--devices={{ dataplane_device }}"
- ipam:
- mode: kubernetes
- loadbalancer:
- mode: dsr
- kubeProxyReplacement: strict
- k8s:
- requireIPv4PodCIDR: true
- k8sServiceHost: "{{ kubernetes_api_address }}"
- k8sServicePort: "{{ kubernetes_api_port | default(6443) }}"
- tunnel: disabled
- ipv4NativeRoutingCIDR: "{{ kubernetes_pod_cidr | default('10.0.0.0/16') }}"
- tags:
- - init
- - name: Wait for Cilium to be ready
- kubernetes.core.k8s_info:
- kind: DaemonSet
- name: cilium
- namespace: kube-system
- register: cilium_ready
- until: cilium_ready.resources | json_query('[0].status | numberReady == desiredNumberScheduled')
- delay: 10
- retries: 6
- tags:
- - init
diff --git a/ansible/kubernetes.yml b/ansible/kubernetes.yml
deleted file mode 100644
index f8b1483..0000000
--- a/ansible/kubernetes.yml
+++ /dev/null
@@ -1,365 +0,0 @@
-# Install kubernetes on hosts
-# yamllint disable rule:truthy rule:line-length
----
-- name: Prepare all hosts for kubernetes
- hosts: servers
- become: yes
- vars:
- required_modules:
- - br_netfilter
- k8s_sysctl:
- - param: net.bridge.bridge-nf-call-iptables
- value: 1
- - param: net.bridge.bridge-nf-call-ip6tables
- value: 1
- - param: net.ipv4.ip_forward
- value: 1
- - param: vm.nr_hugepages
- value: 2048
- tasks:
- - name: Configure persistent modules for Kubernetes
- vars:
- pb_name: kubernetes
- ansible.builtin.template:
- src: templates/servers/modules-load.conf.j2
- dest: "/etc/modules-load.d/kubernetes.conf"
- owner: root
- group: root
- mode: 0644
- tags:
- - prep
- - name: Add modules
- community.general.modprobe:
- name: "{{ item }}"
- state: present
- loop: "{{ required_modules }}"
- tags:
- - prep
- - name: Disable ufw
- community.general.ufw:
- state: disabled
- ignore_errors: yes
- when: ansible_distribution == "Ubuntu"
- tags:
- - prep
- - name: Disable firewalld
- ansible.builtin.systemd:
- name: firewalld
- enabled: no
- masked: yes
- state: stopped
- ignore_errors: yes
- when: ansible_os_family == "RedHat"
- tags:
- - prep
- - name: Set persistent sysctl params for Kubernetes
- ansible.posix.sysctl:
- name: "{{ item['param'] }}"
- value: "{{ item['value'] }}"
- sysctl_file: /etc/sysctl.d/90-kubernetes.conf
- sysctl_set: yes
- reload: yes
- state: present
- loop: "{{ k8s_sysctl }}"
- tags:
- - prep
- - name: disable swap
- ansible.builtin.command: swapoff -a
- tags:
- - base
- - name: remove swapfs
- ansible.posix.mount:
- path: none
- fstype: swap
- state: absent
- tags:
- - prep
- - name: mount bpffs on Ubuntu 18.04
- ansible.posix.mount:
- path: /sys/fs/bpf
- src: bpffs
- fstype: bpf
- opts: rw,nosuid,nodev,noexec,relatime,mode=700
- state: mounted
- when: ansible_distribution == "Ubuntu" and ansible_distribution_major_version == "18"
- tags:
- - prep
- - name: Disable SELinux on CentOS/RHEL
- ansible.posix.selinux:
- state: disabled
- when: ansible_os_family == "RedHat"
- notify:
- - reboot
- tags:
- - prep
- handlers:
- - name: reboot
- ansible.builtin.reboot:
- post_reboot_delay: 120
- reboot_timeout: 300
-
-- name: Install kubernetes
- hosts: servers
- become: yes
- tasks:
- - name: Add Kubernetes apt keys
- ansible.builtin.apt_key:
- url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
- state: present
- when: ansible_os_family == "Debian"
- tags:
- - base
- - name: Add Kubernetes apt repo
- ansible.builtin.apt_repository:
- repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
- state: present
- when: ansible_os_family == "Debian"
- tags:
- - base
- - name: Add Kubernetes yum repo
- ansible.builtin.yum_repository:
- name: kubernetes
- description: Kubernetes
- file: kubernetes
- baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch
- enabled: yes
- gpgcheck: yes
- repo_gpgcheck: yes
- gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- when: ansible_os_family == "RedHat"
- tags:
- - base
- - name: Install kubectl, kubeadm, and kubelet for Debian/Ubuntu
- ansible.builtin.package:
- name:
- - "kubectl={{ kubernetes_version }}-00"
- - "kubelet={{ kubernetes_version }}-00"
- - "kubeadm={{ kubernetes_version }}-00"
- state: present
- when: ansible_os_family == "Debian"
- tags:
- - base
- - name: Hold kubectl, kubeadm, and kubelet on Debian/Ubuntu
- ansible.builtin.dpkg_selections:
- name: "{{ item }}"
- selection: hold
- loop:
- - kubectl
- - kubelet
- - kubeadm
- when: ansible_os_family == "Debian"
- tags:
- - base
- - name: Install kubectl, kubeadm, and kubelet for CentOS/RHEL
- ansible.builtin.package:
- name:
- - "kubectl-{{ kubernetes_version }}"
- - "kubelet-{{ kubernetes_version }}"
- - "kubeadm-{{ kubernetes_version }}"
- state: present
- when: ansible_os_family == "RedHat"
- tags:
- - base
- - name: Enable kubelet unit
- ansible.builtin.systemd:
- name: kubelet
- enabled: yes
- state: started
- daemon_reload: yes
- tags:
- - base
- - name: Create temp kubeadm config directory
- ansible.builtin.file:
- path: /var/tmp/kubernetes
- state: directory
- owner: root
- group: root
- mode: 0755
- when: (k8s_master | default(False)) or (k8s_worker | default(False))
- tags:
- - base
- - name: Generate kube-apiserver patch file
- vars:
- addresses: "{{ vlan100_addresses }}"
- ansible.builtin.template:
- src: templates/kubernetes/kube-apiserver0+json.yaml.j2
- dest: /var/tmp/kubernetes/kube-apiserver0+json.yaml
- owner: root
- group: root
- mode: 0644
- force: no
- when: (k8s_master | default(False)) or (k8s_worker | default(False))
- tags:
- - base
- - name: Generate kubeadm join token
- ansible.builtin.command: kubeadm token generate
- register: kubeadm_token_command
- run_once: yes
- tags:
- - init
- - name: Generate kubeadm cert key
- ansible.builtin.command: kubeadm certs certificate-key
- register: kubeadm_cert_key_command
- run_once: yes
- tags:
- - init
- - name: Generate kubeadm init configuration for first master
- vars:
- addresses: "{{ vlan100_addresses }}"
- kubeadm_token: "{{ kubeadm_token_command.stdout }}"
- kubeadm_cert_key: "{{ kubeadm_cert_key_command.stdout }}"
- ansible.builtin.template:
- src: templates/kubernetes/kubeadm-init.yml.j2
- dest: /var/tmp/kubernetes/kubeadm.yml
- owner: root
- group: root
- mode: 0644
- force: no
- run_once: yes
- when: k8s_master | default(False)
- tags:
- - init
- - name: Execute kubeadm init on first master
- ansible.builtin.command: >-
- kubeadm init --config=/var/tmp/kubernetes/kubeadm.yml --upload-certs
- args:
- creates: /etc/kubernetes/admin.conf
- run_once: yes
- when: k8s_master | default(False)
- tags:
- - init
- - name: Get generated CA certificate from first master
- community.crypto.x509_certificate_info:
- path: /etc/kubernetes/pki/ca.crt
- register: ca_cert
- run_once: yes
- when: k8s_master | default(False)
- tags:
- - init
- - name: Generate kubeadm join configuration for all other nodes
- vars:
- addresses: "{{ vlan100_addresses }}"
- kubeadm_token: "{{ kubeadm_token_command.stdout }}"
- kubeadm_ca_cert_hash: "sha256:{{ ca_cert.public_key_fingerprints.sha256 | replace(':', '') }}"
- kubeadm_cert_key: "{{ kubeadm_cert_key_command.stdout }}"
- ansible.builtin.template:
- src: templates/kubernetes/kubeadm-join.yml.j2
- dest: /var/tmp/kubernetes/kubeadm.yml
- owner: root
- group: root
- mode: 0644
- force: no
- when: (k8s_master | default(False)) or (k8s_worker | default(False))
- tags:
- - init
- - name: Execute kubeadm join on all other nodes
- ansible.builtin.command: >-
- kubeadm join --config=/var/tmp/kubernetes/kubeadm.yml
- args:
- creates: /etc/kubernetes/kubelet.conf
- when: (k8s_master | default(False)) or (k8s_worker | default(False))
- tags:
- - join
- - name: Generate CoreDNS configmap on first master
- vars:
- addresses: "{{ vlan100_addresses }}"
- ansible.builtin.template:
- src: templates/kubernetes/coredns-configmap.yaml.j2
- dest: /var/tmp/kubernetes/coredns-configmap.yaml
- owner: root
- group: root
- mode: 0644
- force: no
- run_once: yes
- when: k8s_master | default(False)
- tags:
- - dns
- - name: Apply CoreDNS configmap on first master
- ansible.builtin.command: >-
- kubectl apply -f /var/tmp/kubernetes/coredns-configmap.yaml
- --kubeconfig /etc/kubernetes/admin.conf
- run_once: yes
- when: k8s_master | default(False)
- tags:
- - dns
- - name: Delete CoreDNS pods
- ansible.builtin.command: >-
- kubectl delete pod -n kube-system -l k8s-app=kube-dns --force
- --kubeconfig=/etc/kubernetes/admin.conf
- run_once: yes
- when: k8s_master | default(False)
- tags:
- - dns
- - name: Remove scheduling taints from masters that are also workers
- ansible.builtin.command: >-
- kubectl taint nodes {{ k8s_node_name }} node-role.kubernetes.io/control-plane-
- --kubeconfig=/etc/kubernetes/admin.conf
- when: (k8s_master | default(False)) and (k8s_worker | default(True))
- ignore_errors: yes
- tags:
- - taints
- - name: Remove legacy scheduling taints from masters that are also workers
- ansible.builtin.command: >-
- kubectl taint nodes {{ k8s_node_name }} node-role.kubernetes.io/master-
- --kubeconfig=/etc/kubernetes/admin.conf
- when: (k8s_master | default(False)) and (k8s_worker | default(True))
- ignore_errors: yes
- tags:
- - taints
- - name: Create admin user account(s)
- ansible.builtin.command: >-
- kubeadm kubeconfig user --client-name {{ item }}@{{ lab_domain }}
- --config=/var/tmp/kubernetes/kubeadm.yml
- register: kubeconfig
- loop: "{{ k8s_admins | default(['memes']) }}"
- run_once: yes
- when: k8s_master | default(False)
- tags:
- - users
- - name: Write admin accounts YAML
- ansible.builtin.template:
- src: templates/kubernetes/admin-user-rbac.yml.j2
- dest: /var/tmp/kubernetes/{{ item.item }}-admin-rbac.yml
- owner: root
- group: root
- mode: 0644
- run_once: yes
- when: k8s_master | default(False) and item.rc == 0
- loop: "{{ kubeconfig.results }}"
- loop_control:
- label: "{{ item.item }}"
- tags:
- - users
- - name: Execute admin account bindings
- ansible.builtin.command: >-
- kubectl apply -f /var/tmp/kubernetes/{{ item.item }}-admin-rbac.yml
- --kubeconfig=/etc/kubernetes/admin.conf
- run_once: yes
- when: k8s_master | default(False) and item.rc == 0
- loop: "{{ kubeconfig.results }}"
- loop_control:
- label: "{{ item.item }}"
- tags:
- - users
- - name: Write user kubeconfig(s) locally
- ansible.builtin.template:
- src: templates/kubernetes/kubeconfig.user.j2
- dest: ../kubeconfig.{{ item.item }}
- force: yes
- mode: 0600
- run_once: yes
- become: no
- delegate_to: localhost
- when: item.rc == 0
- loop: "{{ kubeconfig.results }}"
- loop_control:
- label: "{{ item.item }}"
- tags:
- - users
- - name: Clean up kubeadm templates
- ansible.builtin.file:
- path: /var/tmp/kubernetes
- state: absent
- when: (k8s_master | default(False)) or (k8s_worker | default(False))
- tags:
- - never
diff --git a/ansible/longhorn.yml b/ansible/longhorn.yml
deleted file mode 100644
index ba7b765..0000000
--- a/ansible/longhorn.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Install kubernetes on hosts
-# yamllint disable rule:truthy rule:line-length
----
-- name: Prepare all hosts for longhorn
- hosts: servers
- become: yes
- tasks:
- - name: Install longhorn pre-requisites on Ubuntu
- ansible.builtin.package:
- name:
- - open-iscsi
- - nfs-common
- state: present
- when: ansible_distribution == "Ubuntu"
- tags:
- - base
- - name: Install longhorn pre-requisites on CentOS/RHEL
- ansible.builtin.package:
- name:
- - iscsi-initiator-utils
- - nfs-utils
- state: present
- when: ansible_os_family == "RedHat"
- tags:
- - base
- - name: Enable iscsid unit
- ansible.builtin.systemd:
- name: iscsid
- enabled: yes
- state: restarted
- daemon_reload: yes
- tags:
- - base
-- name: Install longhorn
- hosts: localhost
- become: no
- gather_facts: no
- tasks:
- - name: Deploy longhorn
- ansible.builtin.command: kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/{{ longhorn_version }}/deploy/longhorn.yaml
- tags:
- - init
- - name: Use longhorn as default storage class
- kubernetes.core.k8s:
- state: patched
- kind: StorageClass
- name: longhorn
- definition:
- metadata:
- annotations:
- storageclass.kubernetes.io/is-default-class: 'true'
- tags:
- - init
diff --git a/ansible/templates/kubernetes/admin-user-rbac.yml.j2 b/ansible/templates/kubernetes/admin-user-rbac.yml.j2
deleted file mode 100644
index 4d4728b..0000000
--- a/ansible/templates/kubernetes/admin-user-rbac.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-# Configure RBAC for {{ item.item }} as cluster admin
-{{ ansible_managed | comment }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: {{ item.item }}-cluster-admin
-subjects:
-- kind: User
- name: {{ item.item }}@{{ lab_domain }}
- apiGroup: rbac.authorization.k8s.io
-roleRef:
- kind: ClusterRole
- name: cluster-admin
- apiGroup: rbac.authorization.k8s.io
diff --git a/ansible/templates/kubernetes/coredns-configmap.yaml.j2 b/ansible/templates/kubernetes/coredns-configmap.yaml.j2
deleted file mode 100644
index 0d25906..0000000
--- a/ansible/templates/kubernetes/coredns-configmap.yaml.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-# Configure CoreDNS via Corefile
-{{ ansible_managed | comment }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: coredns
- namespace: kube-system
-data:
- Corefile: |
- .:53 {
- errors
- health {
- lameduck 5s
- }
- ready
- kubernetes cluster.local in-addr.arpa ip6.arpa {
- pods insecure
- fallthrough in-addr.arpa ip6.arpa
- ttl 30
- }
- prometheus :9153
- forward . {{ addresses | ansible.utils.ipv4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address') | unique | join(' ') }} {
- max_concurrent 1000
- }
- cache 30
- loop
- reload
- loadbalance
- }
diff --git a/ansible/templates/kubernetes/kube-apiserver0+json.yaml.j2 b/ansible/templates/kubernetes/kube-apiserver0+json.yaml.j2
deleted file mode 100644
index 16c51c9..0000000
--- a/ansible/templates/kubernetes/kube-apiserver0+json.yaml.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-# Kubeadm patch file to override kube-apiserver configuration for this node
-{{ ansible_managed | comment }}
-- op: add
- path: /spec/containers/0/command/-
- value: --bind-address={{ addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | first }}
diff --git a/ansible/templates/kubernetes/kubeadm-init.yml.j2 b/ansible/templates/kubernetes/kubeadm-init.yml.j2
deleted file mode 100644
index 0a57b3e..0000000
--- a/ansible/templates/kubernetes/kubeadm-init.yml.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-# Overide the default configuration for initial master node
-# Reference generated from kubeadm config print init-defaults --component-configs=KubeletConfiguration
-{{ ansible_managed | comment }}
-apiVersion: kubeadm.k8s.io/v1beta3
-kind: InitConfiguration
-patches:
- directory: /var/tmp/kubernetes
-bootstrapTokens:
- - groups:
- - system:bootstrappers:kubeadm:default-node-token
- token: {{ kubeadm_token }}
-certificateKey: {{ kubeadm_cert_key }}
-localAPIEndpoint:
- advertiseAddress: {{ addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | first }}
- bindPort: {{ kubernetes_api_port | default(6443) }}
-nodeRegistration:
- name: {{ k8s_node_name }}
- kubeletExtraArgs:
- node-ip: {{ addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | join(',') }}
- address: {{ addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | first }}
-skipPhases: {{ kubeadm_skip_phases | to_yaml }}
----
-apiVersion: kubeadm.k8s.io/v1beta3
-kind: ClusterConfiguration
-apiServer:
- certSANs:
- - {{ kubernetes_api_server }}
-clusterName: {{ kubernetes_cluster_name | default('lab') }}
-controlPlaneEndpoint: {{ kubernetes_api_address }}:{{ kubernetes_api_port | default(6443) }}
-networking:
- dnsDomain: {{ kubernetes_dns_domain | default('cluster.local') }}
- serviceSubnet: {{ kubernetes_service_cidr | default('10.1.0.0/22') }}
- podSubnet: {{ kubernetes_pod_cidr | default('10.0.0.0/16') }}
----
-apiVersion: kubelet.config.k8s.io/v1beta1
-kind: KubeletConfiguration
-cgroupDriver: {{ kubernetes_cgroup_driver | default('systemd') }}
-clusterDNS:
- - {{ kubernetes_dns_service | default('10.1.0.10') }}
-clusterDomain: {{ kubernetes_dns_domain | default('cluster.local') }}
diff --git a/ansible/templates/kubernetes/kubeadm-join.yml.j2 b/ansible/templates/kubernetes/kubeadm-join.yml.j2
deleted file mode 100644
index 9ed9c0a..0000000
--- a/ansible/templates/kubernetes/kubeadm-join.yml.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Overide the default configuration for joining nodes
-# Reference generated from kubeadm config print init-defaults --component-configs=KubeletConfiguration
-{{ ansible_managed | comment }}
-apiVersion: kubeadm.k8s.io/v1beta3
-kind: JoinConfiguration
-patches:
- directory: /var/tmp/kubernetes
-discovery:
- bootstrapToken:
- apiServerEndpoint: {{ kubernetes_api_address }}:{{ kubernetes_api_port | default(6443) }}
- token: {{ kubeadm_token }}
- caCertHashes:
- - "{{ kubeadm_ca_cert_hash }}"
-nodeRegistration:
- name: {{ k8s_node_name }}
- kubeletExtraArgs:
- node-ip: {{ addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | join(',') }}
- address: {{ addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | first }}
-{% if k8s_master | default(False) -%}
-controlPlane:
- localAPIEndpoint:
- advertiseAddress: {{ addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | first }}
- bindPort: {{ kubernetes_api_port | default(6443) }}
- certificateKey: {{ kubeadm_cert_key }}
-{%- endif %}
diff --git a/ansible/templates/kubernetes/kubeconfig.user.j2 b/ansible/templates/kubernetes/kubeconfig.user.j2
deleted file mode 100644
index 330eb70..0000000
--- a/ansible/templates/kubernetes/kubeconfig.user.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-# Kubeconfig for {{ item.item }}
-{{ ansible_managed | comment }}
----
-{{ item.stdout | from_yaml | to_nice_yaml(indent=2) }}
diff --git a/ansible/templates/kubernetes/nm-calico.conf.j2 b/ansible/templates/kubernetes/nm-calico.conf.j2
deleted file mode 100644
index b2a8b47..0000000
--- a/ansible/templates/kubernetes/nm-calico.conf.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-[keyfile]
-unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico
diff --git a/ansible/templates/servers/modules-load.conf.j2 b/ansible/templates/servers/modules-load.conf.j2
deleted file mode 100644
index 1a0b600..0000000
--- a/ansible/templates/servers/modules-load.conf.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-# Persistent module configuration for {{ pb_name | default('unknown playbook') }}
-{{ ansible_managed | comment }}
-{% for mod in required_modules %}
-{{ mod }}
-{% endfor %}
diff --git a/foundations/README.md b/foundations/README.md
index 0d0a56a..f8b9d94 100644
--- a/foundations/README.md
+++ b/foundations/README.md
@@ -15,7 +15,7 @@ Accelerated GCP Lab. These resources will be created:
drive)
-
+
## Requirements
| Name | Version |
@@ -68,5 +68,5 @@ No modules.
| [vault\_bucket](#output\_vault\_bucket) | The randomly named GCS bucket that will be used for Vault storage. |
| [vault\_key](#output\_vault\_key) | The GCP service account JSON key file, base64 encoded, that must be used to authenticate as the Vault service account. |
| [vault\_sa](#output\_vault\_sa) | The GCP service account that has access to Vault GCS storage bucket. The Vault service will have to use this account for all GCS operations. |
-
+
diff --git a/inventory/group_vars/servers.yaml b/inventory/group_vars/servers.yaml
index 83d788a..25e1771 100644
--- a/inventory/group_vars/servers.yaml
+++ b/inventory/group_vars/servers.yaml
@@ -1,4 +1,3 @@
# Configure the hosts that are part of the servers group
-# yamllint disable rule:line-length
---
ansible_ssh_extra_args: '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
diff --git a/kubernetes.yml b/kubernetes.yml
index 6b44681..da494ca 100644
--- a/kubernetes.yml
+++ b/kubernetes.yml
@@ -1,5 +1,4 @@
# Provision kubernetes on bare metal hosts
-# yamllint disable rule:line-length
---
- name: Prepare requirements and install Kubernetes on bare-metal hosts
hosts: servers
@@ -63,25 +62,26 @@
tasks:
- name: Setup networking for VLAN 96
- include_role:
+ ansible.builtin.include_role:
name: networking
vars:
- vlans: "{{ hostvars[inventory_hostname]['vlans'] | combine(overrides | default({}), recursive=true) }}"
+ # yamllint disable-line rule:line-length
+ networking_vlans: "{{ hostvars[inventory_hostname]['vlans'] | combine(overrides | default({}), recursive=true) }}"
- name: Prepare hosts for kubernetes
- include_role:
- name: pre-kube
+ ansible.builtin.include_role:
+ name: pre_kube
- name: Flush pending handlers
ansible.builtin.meta: flush_handlers
- name: Reset connections
ansible.builtin.meta: reset_connection
- name: Install CRI-O
- include_role:
+ ansible.builtin.include_role:
name: crio
- name: Install Kubernetes with Kubeadm
- include_role:
+ ansible.builtin.include_role:
name: kubeadm
vars:
- node_addresses: "{{ vlans[96]['addresses'] }}"
+ kubeadm_node_addresses: "{{ vlans[96]['addresses'] }}"
- name: Flush pending handlers
ansible.builtin.meta: flush_handlers
- name: Remove scheduling taints from masters that are also workers
@@ -106,6 +106,7 @@
namespace: kube-system
definition:
data:
+ # yamllint disable rule:line-length
Corefile: |
.:53 {
errors
@@ -127,6 +128,7 @@
reload
loadbalance
}
+ # yamllint enable rule:line-length
delegate_to: 127.0.0.1
become: false
run_once: true
@@ -179,12 +181,13 @@
path: auth/k8s/config
data:
oidc_discovery_url: "https://{{ api_external_hostname }}:{{ api_external_port }}"
+ # yamllint disable-line rule:line-length
oidc_discovery_ca_pem: "{{ lookup('ansible.builtin.file', certificate_dir + './ca.chain.pem') | ansible.builtin.regex_replace('\r?\n+','\\n') }}"
delegate_to: 127.0.0.1
become: false
run_once: true
- name: Install Cilium
- include_role:
+ ansible.builtin.include_role:
name: cilium
# https://docs.cilium.io/en/stable/network/kubernetes/configuration/#crio
- name: Restart CRI-O to recognise Cilium
@@ -194,8 +197,8 @@
state: restarted
daemon_reload: false
- name: Install external-secrets
- include_role:
- name: external-secrets
+ ansible.builtin.include_role:
+ name: external_secrets
- name: Install Longhorn
- include_role:
+ ansible.builtin.include_role:
name: longhorn
diff --git a/provision/boot.yaml b/provision/boot.yaml
index 5101eec..abebf27 100644
--- a/provision/boot.yaml
+++ b/provision/boot.yaml
@@ -1,5 +1,4 @@
# (Re-)boot the lab servers
-# yamllint disable rule:line-length
---
- name: (Re-)boot servers
hosts: servers
diff --git a/provision/repave.yaml b/provision/repave.yaml
index 2c2cb5b..af98c1d 100644
--- a/provision/repave.yaml
+++ b/provision/repave.yaml
@@ -1,5 +1,4 @@
# Playbook to repave servers via PXE
-# yamllint disable rule:line-length
---
- name: Prepare for server repave from PXE
hosts: shared
diff --git a/provision/shutdown.yaml b/provision/shutdown.yaml
index 819b64c..eb7d001 100644
--- a/provision/shutdown.yaml
+++ b/provision/shutdown.yaml
@@ -1,5 +1,4 @@
# Shutdown the lab servers
-# yamllint disable rule:line-length
---
- name: Power off servers
hosts: servers
diff --git a/roles/cilium/defaults/main.yml b/roles/cilium/defaults/main.yml
index 7540b23..29eeece 100644
--- a/roles/cilium/defaults/main.yml
+++ b/roles/cilium/defaults/main.yml
@@ -1,24 +1,25 @@
+# defaults file for cilium; unless noted, these will map to the default values used by upstream cilium.
---
-# defaults file for cilium
-version: 1.13.2
+cilium_version: 1.13.2
# The endpoint should be the VIP and port that can distribute requests to any
# master node
-api_external_name:
-api_external_port: 6443
-dataplane_device:
+cilium_api_external_name:
+cilium_api_external_port: 6443
+cilium_dataplane_device:
# BGP peer must be the address of the peer
-bgp_peer:
+cilium_bgp_peer:
# Default values for BGP
-bgp_asn: 64513
-bgp_peer_asn: 64512
-certificate_dir: ../../certs/kubernetes/
+cilium_bgp_asn: 64513
+cilium_bgp_peer_asn: 64512
+cilium_certificate_dir: ../../certs/kubernetes/
# These should be overridden to match the values provided when provisioning
# Kubernetes
-ipv4_native_routing_cidr: 10.0.0.0/8
-ipv4_node_mask: 24
-ipv6_node_mask: 112
-lb_cidrs: []
-pod_cidrs:
+cilium_ipv4_native_routing_cidr: 10.0.0.0/8
+# TODO @memes - node masks are not used in current cilium role; remove or find out why they are here.
+cilium_ipv4_node_mask: 24
+cilium_ipv6_node_mask: 112
+cilium_lb_cidrs: []
+cilium_pod_cidrs:
- 10.244.0.0/16
-service_cidrs:
+cilium_service_cidrs:
- 10.96.0.0/12
diff --git a/roles/cilium/meta/main.yml b/roles/cilium/meta/main.yml
index a358e61..4e29f09 100644
--- a/roles/cilium/meta/main.yml
+++ b/roles/cilium/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: Matthew Emes
- description: Install cillium to kubernetes
+ description: Install cilium to kubernetes
license: MIT
min_ansible_version: '2.1'
galaxy_tags: []
diff --git a/roles/cilium/tasks/main.yml b/roles/cilium/tasks/main.yml
index 75eff81..148dcbf 100644
--- a/roles/cilium/tasks/main.yml
+++ b/roles/cilium/tasks/main.yml
@@ -1,12 +1,12 @@
# Install Cilium as CNI
-# yamllint disable rule:line-length
---
-- set_fact:
+- name: Build a list of cilium CIDR pools for load balancer cidrs
+ ansible.builtin.set_fact:
lb_cidr_pool: "{{ lb_cidr_pool | default([]) + [{'cidr': item}] }}"
- loop: "{{ lb_cidrs }}"
+ loop: "{{ cilium_lb_cidrs }}"
- name: Label nodes to accept default BGP peering policy
kubernetes.core.k8s:
- ca_cert: "{{ certificate_dir }}/ca.chain.pem"
+ ca_cert: "{{ cilium_certificate_dir }}/ca.chain.pem"
state: patched
kind: Node
definition:
@@ -25,10 +25,10 @@
run_once: true
- name: Install Cilium from chart
kubernetes.core.helm:
- ca_cert: "{{ certificate_dir }}/ca.chain.pem"
+ ca_cert: "{{ cilium_certificate_dir }}/ca.chain.pem"
name: cilium
chart_ref: cilium/cilium
- chart_version: "{{ version }}"
+ chart_version: "{{ cilium_version }}"
update_repo_cache: false
force: true
wait: true
@@ -47,23 +47,24 @@
integration: crio
enableIPv4Masquerade: false
enableIPv6Masquerade: false
- extraArgs: "{{ ['--devices=%s' | format(dataplane_device)] if dataplane_device else [] | to_yaml }}"
+ extraArgs: "{{ ['--devices=%s' | format(cilium_dataplane_device)] if cilium_dataplane_device else [] | to_yaml }}"
extraConfig:
- enable-ipv6-ndp: "{{ ((pod_cidrs + service_cidrs + lb_cidrs) | ansible.utils.ipv6 | length > 0) | string | lower }}"
- ipv6-mcast-device: "{{ dataplane_device }}"
- ipv6-service-range: "{{ service_cidrs | ansible.utils.ipv6 | default(['auto'], true) | join(',') }}"
+ # yamllint disable-line rule:line-length
+ enable-ipv6-ndp: "{{ ((cilium_pod_cidrs + cilium_service_cidrs + cilium_lb_cidrs) | ansible.utils.ipv6 | length > 0) | string | lower }}"
+ ipv6-mcast-device: "{{ cilium_dataplane_device }}"
+ ipv6-service-range: "{{ cilium_service_cidrs | ansible.utils.ipv6 | default(['auto'], true) | join(',') }}"
ipv4:
- enabled: "{{ (pod_cidrs + service_cidrs + lb_cidrs) | ansible.utils.ipv4 | length > 0 }}"
- ipv4NativeRoutingCIDR: "{{ ipv4_native_routing_cidr }}"
+ enabled: "{{ (cilium_pod_cidrs + cilium_service_cidrs + cilium_lb_cidrs) | ansible.utils.ipv4 | length > 0 }}"
+ ipv4NativeRoutingCIDR: "{{ cilium_ipv4_native_routing_cidr }}"
ipv6:
- enabled: "{{ (pod_cidrs + service_cidrs + lb_cidrs) | ansible.utils.ipv6 | length > 0 }}"
+ enabled: "{{ (cilium_pod_cidrs + cilium_service_cidrs + cilium_lb_cidrs) | ansible.utils.ipv6 | length > 0 }}"
ipam:
mode: kubernetes
k8s:
- requireIPv4PodCidr: "{{ pod_cidrs | ansible.utils.ipv4 | length > 0 }}"
- requireIPv6PodCidr: "{{ pod_cidrs | ansible.utils.ipv6 | length > 0 }}"
- k8sServiceHost: "{{ api_external_hostname }}"
- k8sServicePort: "{{ api_external_port }}"
+ requireIPv4PodCidr: "{{ cilium_pod_cidrs | ansible.utils.ipv4 | length > 0 }}"
+ requireIPv6PodCidr: "{{ cilium_pod_cidrs | ansible.utils.ipv6 | length > 0 }}"
+ k8sServiceHost: "{{ cilium_api_external_name }}"
+ k8sServicePort: "{{ cilium_api_external_port }}"
kubeProxyReplacement: strict
tunnel: disabled
delegate_to: 127.0.0.1
@@ -71,7 +72,7 @@
run_once: true
- name: Wait for Cilium to be ready
kubernetes.core.k8s_info:
- ca_cert: "{{ certificate_dir }}/ca.chain.pem"
+ ca_cert: "{{ cilium_certificate_dir }}/ca.chain.pem"
kind: DaemonSet
name: cilium
namespace: kube-system
@@ -84,7 +85,7 @@
run_once: true
- name: Create load-balancer IP pool
kubernetes.core.k8s:
- ca_cert: "{{ certificate_dir }}/ca.chain.pem"
+ ca_cert: "{{ cilium_certificate_dir }}/ca.chain.pem"
state: present
definition:
apiVersion: cilium.io/v2alpha1
@@ -98,7 +99,7 @@
run_once: true
- name: Create BGP peering policy
kubernetes.core.k8s:
- ca_cert: "{{ certificate_dir }}/ca.chain.pem"
+ ca_cert: "{{ cilium_certificate_dir }}/ca.chain.pem"
state: present
definition:
apiVersion: cilium.io/v2alpha1
@@ -110,11 +111,11 @@
matchLabels:
lab.acceleratedgcp.com/bgp-peering-policy: default-peering-policy
virtualRouters:
- - localASN: "{{ bgp_asn }}"
+ - localASN: "{{ cilium_bgp_asn }}"
exportPodCIDR: true
neighbors:
- - peerAddress: "{{ bgp_peer | ansible.utils.ipaddr('host') }}"
- peerASN: "{{ bgp_peer_asn }}"
+ - peerAddress: "{{ cilium_bgp_peer | ansible.utils.ipaddr('host') }}"
+ peerASN: "{{ cilium_bgp_peer_asn }}"
eBGPMultihopTTL: 10
connectRetryTimeSeconds: 120
holdTimeSeconds: 90
diff --git a/roles/crio/defaults/main.yml b/roles/crio/defaults/main.yml
index 7783f97..6053bcb 100644
--- a/roles/crio/defaults/main.yml
+++ b/roles/crio/defaults/main.yml
@@ -1,3 +1,3 @@
---
# defaults file for cri-o
-version: '1.27.0'
+crio_version: '1.27.0'
diff --git a/roles/crio/tasks/centos.yml b/roles/crio/tasks/centos.yml
index 63bf11f..7ccbdfe 100644
--- a/roles/crio/tasks/centos.yml
+++ b/roles/crio/tasks/centos.yml
@@ -1,16 +1,17 @@
# Perform CentOS specific tasks for CRI-O
-# yamllint disable rule:line-length
---
- name: Add CentOS CRI-O package repo
ansible.builtin.get_url:
- url: "{{ base_url }}stable:/cri-o:/{{ repo_version }}/CentOS_{{ ansible_distribution_major_version }}/devel:kubic:libcontainers:stable:cri-o:{{ repo_version | replace('/', '') }}.repo"
+ # yamllint disable-line rule:line-length
+ url: "{{ crio_base_url }}stable:/cri-o:/{{ crio_repo_version }}/CentOS_{{ ansible_distribution_major_version }}/devel:kubic:libcontainers:stable:cri-o:{{ crio_repo_version | replace('/', '') }}.repo"
dest: /etc/yum.repos.d/crio.repo
owner: root
group: root
mode: '0644'
- name: Add CentOS CRI-O stable packages repo
ansible.builtin.get_url:
- url: "{{ base_url }}stable/CentOS_{{ ansible_distribution_major_version }}/devel:kubic:libcontainers:stable.repo"
+ # yamllint disable-line rule:line-length
+ url: "{{ crio_base_url }}stable/CentOS_{{ ansible_distribution_major_version }}/devel:kubic:libcontainers:stable.repo"
dest: /etc/yum.repos.d/crio_stable.repo
owner: root
group: root
diff --git a/roles/crio/tasks/debian.yml b/roles/crio/tasks/debian.yml
index 6bdfe9f..9f8bc04 100644
--- a/roles/crio/tasks/debian.yml
+++ b/roles/crio/tasks/debian.yml
@@ -1,16 +1,16 @@
# Perform Debian specific tasks for CRI-O
-# yamllint disable rule:line-length
---
- name: Fetch repo key for CRI-O apt packages
ansible.builtin.get_url:
- url: "{{ base_url }}stable:/cri-o:/{{ repo_version }}/{{ apt_repo_mapping[ansible_distribution] }}/Release.key"
+ # yamllint disable-line rule:line-length
+ url: "{{ crio_base_url }}stable:/cri-o:/{{ crio_repo_version }}/{{ crio_apt_repo_mapping[ansible_distribution] }}/Release.key"
dest: /var/tmp/crio/crio.key
owner: root
group: root
mode: '0640'
- name: Fetch repo key for stable CRI-O apt packages
ansible.builtin.get_url:
- url: "{{ base_url }}stable/{{ apt_repo_mapping[ansible_distribution] }}/Release.key"
+ url: "{{ crio_base_url }}stable/{{ crio_apt_repo_mapping[ansible_distribution] }}/Release.key"
dest: /var/tmp/crio/crio_stable.key
owner: root
group: root
@@ -38,13 +38,15 @@
- crio_stable.gpg
- name: Add CRI-O apt package repo
ansible.builtin.apt_repository:
- repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/crio.gpg] {{ base_url }}stable:/cri-o:/{{ repo_version }}/{{ apt_repo_mapping[ansible_distribution] }}/ /"
+ # yamllint disable-line rule:line-length
+ repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/crio.gpg] {{ crio_base_url }}stable:/cri-o:/{{ crio_repo_version }}/{{ crio_apt_repo_mapping[ansible_distribution] }}/ /"
state: present
filename: crio
update_cache: true
- name: Add CRI-O stable apt package repo
ansible.builtin.apt_repository:
- repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/crio_stable.gpg] {{ base_url }}stable/{{ apt_repo_mapping[ansible_distribution] }}/ /"
+ # yamllint disable-line rule:line-length
+ repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/crio_stable.gpg] {{ crio_base_url }}stable/{{ crio_apt_repo_mapping[ansible_distribution] }}/ /"
state: present
filename: crio_stable
update_cache: true
diff --git a/roles/crio/tasks/main.yml b/roles/crio/tasks/main.yml
index e33ff65..6f420a3 100644
--- a/roles/crio/tasks/main.yml
+++ b/roles/crio/tasks/main.yml
@@ -1,5 +1,4 @@
# Install CRI-O runtime
-# yamllint disable rule:line-length
---
- name: Configure persistent modules for CRI-O
ansible.builtin.template:
@@ -12,8 +11,8 @@
community.general.modprobe:
name: "{{ item }}"
state: present
- loop: "{{ required_modules }}"
-- name: Create temp crio config directory
+ loop: "{{ crio_required_modules }}"
+- name: Create temp CRI-O config directory
ansible.builtin.file:
path: /var/tmp/crio
state: directory
@@ -28,7 +27,7 @@
when: ansible_distribution == "CentOS"
- name: Install CRI-O packages
ansible.builtin.package:
- name: "{{ packages[ansible_os_family] }}"
+ name: "{{ crio_packages[ansible_os_family] }}"
state: present
- name: Create CRI-O unit override directory
ansible.builtin.file:
diff --git a/roles/crio/templates/modules-load.conf.j2 b/roles/crio/templates/modules-load.conf.j2
index 8d4da38..79afe26 100644
--- a/roles/crio/templates/modules-load.conf.j2
+++ b/roles/crio/templates/modules-load.conf.j2
@@ -1,5 +1,5 @@
# Persistent module configuration for CRI-O
{{ ansible_managed | comment }}
-{% for mod in required_modules %}
+{% for mod in crio_required_modules %}
{{ mod }}
{% endfor %}
diff --git a/roles/crio/vars/main.yml b/roles/crio/vars/main.yml
index a8ae72a..67b6fb6 100644
--- a/roles/crio/vars/main.yml
+++ b/roles/crio/vars/main.yml
@@ -1,16 +1,15 @@
-# yamllint disable rule:line-length
---
-required_modules:
+crio_required_modules:
- overlay
-maj_minor: "{{ version.split('.')[:2] | join('.') }}"
-repo_version: "{{ maj_minor }}{% if maj_minor != version %}:/{{ version }}{% endif %}"
-base_url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/
-packages:
+crio_maj_minor: "{{ crio_version.split('.')[:2] | join('.') }}"
+crio_repo_version: "{{ crio_maj_minor }}{% if crio_maj_minor != version %}:/{{ crio_version }}{% endif %}"
+crio_base_url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/
+crio_packages:
Debian:
- cri-o
- cri-o-runc
RedHat:
- cri-o
-apt_repo_mapping:
+crio_apt_repo_mapping:
Debian: "Debian_{{ ansible_distribution_version }}"
Ubuntu: "xUbuntu_{{ ansible_distribution_version }}"
diff --git a/roles/external-secrets/defaults/main.yml b/roles/external-secrets/defaults/main.yml
deleted file mode 100644
index 4d32b5c..0000000
--- a/roles/external-secrets/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for external-secrets
-version: 0.8.1
diff --git a/roles/external-secrets/README.md b/roles/external_secrets/README.md
similarity index 100%
rename from roles/external-secrets/README.md
rename to roles/external_secrets/README.md
diff --git a/roles/external_secrets/defaults/main.yml b/roles/external_secrets/defaults/main.yml
new file mode 100644
index 0000000..cc35f50
--- /dev/null
+++ b/roles/external_secrets/defaults/main.yml
@@ -0,0 +1,4 @@
+# defaults file for external-secrets
+---
+external_secrets_version: 0.8.1
+external_secrets_certificate_dir: ../../certs/kubernetes/
diff --git a/roles/external-secrets/handlers/main.yml b/roles/external_secrets/handlers/main.yml
similarity index 100%
rename from roles/external-secrets/handlers/main.yml
rename to roles/external_secrets/handlers/main.yml
diff --git a/roles/external-secrets/meta/main.yml b/roles/external_secrets/meta/main.yml
similarity index 100%
rename from roles/external-secrets/meta/main.yml
rename to roles/external_secrets/meta/main.yml
diff --git a/roles/external-secrets/tasks/main.yml b/roles/external_secrets/tasks/main.yml
similarity index 82%
rename from roles/external-secrets/tasks/main.yml
rename to roles/external_secrets/tasks/main.yml
index 4c3dd20..8e9a39f 100644
--- a/roles/external-secrets/tasks/main.yml
+++ b/roles/external_secrets/tasks/main.yml
@@ -9,10 +9,10 @@
run_once: true
- name: Install external-secrets from chart
kubernetes.core.helm:
- ca_cert: "{{ certificate_dir }}/ca.chain.pem"
+ ca_cert: "{{ external_secrets_certificate_dir }}/ca.chain.pem"
name: external-secrets
chart_ref: external-secrets/external-secrets
- chart_version: "{{ version }}"
+ chart_version: "{{ external_secrets_version }}"
create_namespace: true
update_repo_cache: false
force: true
diff --git a/roles/external-secrets/tests/inventory b/roles/external_secrets/tests/inventory
similarity index 100%
rename from roles/external-secrets/tests/inventory
rename to roles/external_secrets/tests/inventory
diff --git a/roles/external-secrets/tests/test.yml b/roles/external_secrets/tests/test.yml
similarity index 69%
rename from roles/external-secrets/tests/test.yml
rename to roles/external_secrets/tests/test.yml
index a5f756d..05cf7f8 100644
--- a/roles/external-secrets/tests/test.yml
+++ b/roles/external_secrets/tests/test.yml
@@ -2,4 +2,4 @@
- hosts: localhost
remote_user: root
roles:
- - external-secrets
+ - external_secrets
diff --git a/roles/external-secrets/vars/main.yml b/roles/external_secrets/vars/main.yml
similarity index 100%
rename from roles/external-secrets/vars/main.yml
rename to roles/external_secrets/vars/main.yml
diff --git a/roles/kubeadm/defaults/main.yml b/roles/kubeadm/defaults/main.yml
index 7e3c4b3..3e9547b 100644
--- a/roles/kubeadm/defaults/main.yml
+++ b/roles/kubeadm/defaults/main.yml
@@ -1,18 +1,21 @@
---
-version: 1.27.1
+kubeadm_version: 1.27.1
# The endpoint should resolve to the VIP and port that can distribute requests
# to any master node
-api_external_hostname:
-api_external_port: 6443
+kubeadm_api_external_hostname:
+kubeadm_api_external_port: 6443
# These match the defaults in kubeadm and should be overridden
-api_bind_port: 6443
-cluster_name: kubernetes
-dns_domain: cluster.local
-ipv4_node_mask: 24
-ipv6_node_mask: 112
-pod_cidrs:
+kubeadm_api_bind_port: 6443
+kubeadm_cluster_name: kubernetes
+kubeadm_dns_domain: cluster.local
+kubeadm_ipv4_node_mask: 24
+kubeadm_ipv6_node_mask: 112
+kubeadm_pod_cidrs:
- 10.244.0.0/16
-service_cidrs:
+kubeadm_service_cidrs:
- 10.96.0.0/12
-skip_phases: []
-certificate_dir: ../../certs/kubernetes/
+kubeadm_skip_phases: []
+kubeadm_certificate_dir: ../../certs/kubernetes/
+kubeadm_admins: []
+kubeadm_dns_service:
+kubeadm_node_addresses: []
diff --git a/roles/kubeadm/handlers/main.yml b/roles/kubeadm/handlers/main.yml
index ef3b8b4..ed97d53 100644
--- a/roles/kubeadm/handlers/main.yml
+++ b/roles/kubeadm/handlers/main.yml
@@ -1,5 +1 @@
---
-- name: reboot
- ansible.builtin.reboot:
- post_reboot_delay: 120
- reboot_timeout: 300
diff --git a/roles/kubeadm/tasks/common.yml b/roles/kubeadm/tasks/common.yml
index b661a5a..5d57d8c 100644
--- a/roles/kubeadm/tasks/common.yml
+++ b/roles/kubeadm/tasks/common.yml
@@ -3,15 +3,15 @@
ansible.builtin.file:
path: /etc/kubernetes/pki/etcd
state: directory
- mode: 0755
+ mode: '0755'
owner: root
group: root
- name: Copy Kubernetes CA certificates
ansible.builtin.copy:
- src: "{{ certificate_dir }}/{{ item }}"
+ src: "{{ kubeadm_certificate_dir }}/{{ item }}"
dest: /etc/kubernetes/pki/
force: true
- mode: 0644
+ mode: '0644'
owner: root
group: root
loop:
@@ -20,10 +20,10 @@
- sa.pub
- name: Copy Kubernetes CA keys
ansible.builtin.copy:
- src: "{{ certificate_dir }}/{{ item }}"
+ src: "{{ kubeadm_certificate_dir }}/{{ item }}"
dest: /etc/kubernetes/pki/
force: true
- mode: 0600
+ mode: '0600'
owner: root
group: root
loop:
@@ -32,18 +32,18 @@
- sa.key
- name: Copy Kubernetes etcd CA certificate
ansible.builtin.copy:
- src: "{{ certificate_dir }}/etcd-ca.crt"
+ src: "{{ kubeadm_certificate_dir }}/etcd-ca.crt"
dest: /etc/kubernetes/pki/etcd/ca.crt
force: true
- mode: 0644
+ mode: '0644'
owner: root
group: root
- name: Copy Kubernetes etcd CA key
ansible.builtin.copy:
- src: "{{ certificate_dir }}/etcd-ca.key"
+ src: "{{ kubeadm_certificate_dir }}/etcd-ca.key"
dest: /etc/kubernetes/pki/etcd/ca.key
force: true
- mode: 0600
+ mode: '0600'
owner: root
group: root
- name: Create temp kubeadm config directory
diff --git a/roles/kubeadm/tasks/debian.yml b/roles/kubeadm/tasks/debian.yml
index d9232e4..c05aba8 100644
--- a/roles/kubeadm/tasks/debian.yml
+++ b/roles/kubeadm/tasks/debian.yml
@@ -1,5 +1,4 @@
# Perform Debian/Ubuntu specific tasks common to all hosts
-# yamllint disable rule:line-length
---
- name: Fetch repo key for kubernetes apt packages
ansible.builtin.get_url:
@@ -30,9 +29,9 @@
ansible.builtin.apt:
name:
- cri-tools
- - "kubectl={{ version }}-00"
- - "kubelet={{ version }}-00"
- - "kubeadm={{ version }}-00"
+ - "kubectl={{ kubeadm_version }}-00"
+ - "kubelet={{ kubeadm_version }}-00"
+ - "kubeadm={{ kubeadm_version }}-00"
state: present
allow_change_held_packages: false
allow_downgrade: false
diff --git a/roles/kubeadm/tasks/init.yml b/roles/kubeadm/tasks/init.yml
index e4521c7..f5c0bcd 100644
--- a/roles/kubeadm/tasks/init.yml
+++ b/roles/kubeadm/tasks/init.yml
@@ -1,4 +1,3 @@
-# yamllint disable rule:line-length
---
- name: Generate kubeadm token
ansible.builtin.command: kubeadm token generate
@@ -6,8 +5,7 @@
creates: /var/tmp/kubeadm/config.yml
register: token_command
run_once: true
- delegate_to: "{{ init_target }}"
- # when: inventory_hostname == init_target
+ delegate_to: "{{ kubeadm_init_target }}"
changed_when: token_command.rc == 0 and token_command.stdout != ""
- name: Generate kubeadm cert key, if needed
ansible.builtin.command: kubeadm certs certificate-key
@@ -15,14 +13,13 @@
creates: /var/tmp/kubeadm/config.yml
register: cert_key_command
run_once: true
- delegate_to: "{{ init_target }}"
- # when: inventory_hostname == init_target
+ delegate_to: "{{ kubeadm_init_target }}"
changed_when: cert_key_command.rc == 0 and token_command.stdout != ""
- name: Generate kubeadm init configuration
vars:
token: "{{ token_command.stdout }}"
cert_key: "{{ cert_key_command.stdout }}"
- external_hostname: "{{ api_external_hostname }}"
+ external_hostname: "{{ kubeadm_api_external_hostname }}"
node_name: "{{ k8s.node_name }}"
ansible.builtin.template:
src: templates/kubeadm-init.yml.j2
@@ -32,23 +29,20 @@
mode: '0644'
force: false
run_once: true
- delegate_to: "{{ init_target }}"
- # when: inventory_hostname == init_target
+ delegate_to: "{{ kubeadm_init_target }}"
- name: Execute kubeadm init
ansible.builtin.command: >-
kubeadm init --config=/var/tmp/kubeadm/config.yml
args:
creates: /etc/kubernetes/admin.conf
run_once: true
- delegate_to: "{{ init_target }}"
- # when: inventory_hostname == init_target
+ delegate_to: "{{ kubeadm_init_target }}"
- name: Read the generated kubeadm initialisation file
ansible.builtin.slurp:
src: /var/tmp/kubeadm/config.yml
register: init_kubeadm
run_once: true
- delegate_to: "{{ init_target }}"
- # when: inventory_hostname == init_target
+ delegate_to: "{{ kubeadm_init_target }}"
- name: Get CA certificate info
community.crypto.x509_certificate_info:
path: "{{ certificate_dir }}/ca.crt"
@@ -63,17 +57,15 @@
cert_key: "{{ (init_kubeadm.content | b64decode | from_yaml_all)[0].certificateKey }}"
token: "{{ (init_kubeadm.content | b64decode | from_yaml_all)[0].bootstrapTokens[0].token }}"
run_once: true
- delegate_to: "{{ init_target }}"
- # when: inventory_hostname == init_target
-- name: Create admin user account(s)
+ delegate_to: "{{ kubeadm_init_target }}"
+- name: Create admin user account(s) # noqa: no-changed-when # Always create the admin accounts
ansible.builtin.command: >-
- kubeadm kubeconfig user --client-name {{ item }}@{{ lab_domain }}
+ kubeadm kubeconfig user --client-name {{ item }}@{{ kubeadm_dns_domain }}
--config=/var/tmp/kubeadm/config.yml
register: kubeconfig
- loop: "{{ k8s_admins | default(['memes']) }}"
+ loop: "{{ kubeadm_admins | default(['memes']) }}"
run_once: true
- delegate_to: "{{ init_target }}"
- # when: inventory_hostname == init_target
+ delegate_to: "{{ kubeadm_init_target }}"
- name: Write admin accounts YAML
vars:
user: "{{ item.item }}"
@@ -84,17 +76,17 @@
group: root
mode: '0644'
run_once: true
- delegate_to: "{{ init_target }}"
+ delegate_to: "{{ kubeadm_init_target }}"
when: item.rc == 0
loop: "{{ kubeconfig.results }}"
loop_control:
label: "{{ item.item }}"
-- name: Execute admin account bindings
+- name: Execute admin account bindings # noqa: no-changed-when # Always update the admin accounts
ansible.builtin.command: >-
kubectl apply -f /var/tmp/kubeadm/{{ item.item }}-admin-rbac.yml
--kubeconfig=/etc/kubernetes/admin.conf
run_once: true
- delegate_to: "{{ init_target }}"
+ delegate_to: "{{ kubeadm_init_target }}"
when: item.rc == 0
loop: "{{ kubeconfig.results }}"
loop_control:
diff --git a/roles/kubeadm/tasks/join.yml b/roles/kubeadm/tasks/join.yml
index 514b0c7..110e235 100644
--- a/roles/kubeadm/tasks/join.yml
+++ b/roles/kubeadm/tasks/join.yml
@@ -13,10 +13,10 @@
group: root
mode: '0644'
force: false
- when: inventory_hostname != init_target
+ when: inventory_hostname != kubeadm_init_target
- name: Execute kubeadm join on nodes
ansible.builtin.command: >-
kubeadm join --config=/var/tmp/kubeadm/config.yml
args:
creates: /etc/kubernetes/kubelet.conf
- when: inventory_hostname != init_target
+ when: inventory_hostname != kubeadm_init_target
diff --git a/roles/kubeadm/tasks/rhel.yml b/roles/kubeadm/tasks/rhel.yml
index 7eb0dab..b6f83be 100644
--- a/roles/kubeadm/tasks/rhel.yml
+++ b/roles/kubeadm/tasks/rhel.yml
@@ -1,5 +1,4 @@
# Perform CentOS/RHEL specific tasks common to all hosts
-# yamllint disable rule:line-length
---
- name: Add Kubernetes yum repo
ansible.builtin.yum_repository:
@@ -10,12 +9,13 @@
enabled: true
gpgcheck: true
repo_gpgcheck: true
+ # yamllint disable-line rule:line-length
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- name: Install required packages
ansible.builtin.package:
name:
- cri-tools
- iproute-tc
- - "kubectl-{{ version }}"
- - "kubelet-{{ version }}"
- - "kubeadm-{{ version }}"
+ - "kubectl-{{ kubeadm_version }}"
+ - "kubelet-{{ kubeadm_version }}"
+ - "kubeadm-{{ kubeadm_version }}"
diff --git a/roles/kubeadm/templates/admin-user-rbac.yml.j2 b/roles/kubeadm/templates/admin-user-rbac.yml.j2
index 4415c81..12631c3 100644
--- a/roles/kubeadm/templates/admin-user-rbac.yml.j2
+++ b/roles/kubeadm/templates/admin-user-rbac.yml.j2
@@ -7,7 +7,7 @@ metadata:
name: {{ user }}-cluster-admin
subjects:
- kind: User
- name: {{ user }}@{{ lab_domain | default('lab.acceleratedgcp.com') }}
+ name: {{ user }}@{{ kubeadm_dns_domain | default('lab.acceleratedgcp.com') }}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
diff --git a/roles/kubeadm/templates/kube-apiserver0+json.yaml.j2 b/roles/kubeadm/templates/kube-apiserver0+json.yaml.j2
index cced00d..1b17eca 100644
--- a/roles/kubeadm/templates/kube-apiserver0+json.yaml.j2
+++ b/roles/kubeadm/templates/kube-apiserver0+json.yaml.j2
@@ -2,4 +2,4 @@
{{ ansible_managed | comment }}
- op: add
path: /spec/containers/0/command/-
- value: --bind-address={{ node_addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | first }}
+ value: --bind-address={{ kubeadm_node_addresses | ansible.utils.ipv4('host/prefix') | ansible.utils.ipv4('address') | first }}
diff --git a/roles/kubeadm/templates/kubeadm-init.yml.j2 b/roles/kubeadm/templates/kubeadm-init.yml.j2
index 654f2d1..54d27e4 100644
--- a/roles/kubeadm/templates/kubeadm-init.yml.j2
+++ b/roles/kubeadm/templates/kubeadm-init.yml.j2
@@ -11,14 +11,14 @@ bootstrapTokens:
token: {{ token }}
certificateKey: {{ cert_key }}
localAPIEndpoint:
- advertiseAddress: "{{ node_addresses | ansible.utils.ipaddr('address') | first }}"
- bindPort: {{ api_bind_port }}
+ advertiseAddress: "{{ kubeadm_node_addresses | ansible.utils.ipaddr('address') | first }}"
+ bindPort: {{ kubeadm_api_bind_port }}
nodeRegistration:
name: {{ node_name }}
kubeletExtraArgs:
- node-ip: "{{ node_addresses | ansible.utils.ipaddr('address') | join(',') }}"
- address: "{{ node_addresses | ansible.utils.ipaddr('address') | first }}"
-skipPhases: {{ skip_phases['init'] | default([]) | to_yaml }}
+ node-ip: "{{ kubeadm_node_addresses | ansible.utils.ipaddr('address') | join(',') }}"
+ address: "{{ kubeadm_node_addresses | ansible.utils.ipaddr('address') | first }}"
+skipPhases: {{ kubeadm_skip_phases['init'] | default([]) | to_yaml }}
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
@@ -26,21 +26,21 @@ apiServer:
certSANs:
- {{ external_hostname }}
extraArgs:
- external-hostname: {{ api_external_hostname }}
- service-account-issuer: "https://{{ api_external_hostname }}:{{ api_external_port }}"
+ external-hostname: {{ kubeadm_api_external_hostname }}
+ service-account-issuer: "https://{{ kubeadm_api_external_hostname }}:{{ kubeadm_api_external_port }}"
controllerManager:
- extraArgs: {{ ipv4_extra_args | combine(ipv6_extra_args) | to_yaml }}
-clusterName: {{ cluster_name }}
-controlPlaneEndpoint: "{{ api_external_hostname }}:{{ api_external_port }}"
-kubernetesVersion: {{ version }}
+ extraArgs: {{ kubeadm_ipv4_extra_args | combine(kubeadm_ipv6_extra_args) | to_yaml }}
+clusterName: {{ kubeadm_cluster_name }}
+controlPlaneEndpoint: "{{ kubeadm_api_external_hostname }}:{{ kubeadm_api_external_port }}"
+kubernetesVersion: {{ kubeadm_version }}
networking:
- dnsDomain: {{ dns_domain }}
- serviceSubnet: "{{ service_cidrs | join(',') }}"
- podSubnet: "{{ pod_cidrs | join(',') }}"
+ dnsDomain: {{ kubeadm_dns_domain }}
+ serviceSubnet: "{{ kubeadm_service_cidrs | join(',') }}"
+ podSubnet: "{{ kubeadm_pod_cidrs | join(',') }}"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
-cgroupDriver: {{ cgroup_driver }}
+cgroupDriver: {{ kubeadm_cgroup_driver }}
clusterDNS:
- - "{{ dns_service | default(service_cidrs | first | ansible.utils.nthhost(10)) }}"
-clusterDomain: {{ dns_domain }}
+ - "{{ kubeadm_dns_service | default(kubeadm_service_cidrs | first | ansible.utils.nthhost(10)) }}"
+clusterDomain: {{ kubeadm_dns_domain }}
diff --git a/roles/kubeadm/templates/kubeadm-join.yml.j2 b/roles/kubeadm/templates/kubeadm-join.yml.j2
index 0efae34..ef2fbdb 100644
--- a/roles/kubeadm/templates/kubeadm-join.yml.j2
+++ b/roles/kubeadm/templates/kubeadm-join.yml.j2
@@ -14,13 +14,13 @@ discovery:
nodeRegistration:
name: {{ node_name }}
kubeletExtraArgs:
- node-ip: "{{ node_addresses | ansible.utils.ipaddr('address') | join(',') }}"
- address: "{{ node_addresses | ansible.utils.ipaddr('address') | first }}"
+ node-ip: "{{ kubeadm_node_addresses | ansible.utils.ipaddr('address') | join(',') }}"
+ address: "{{ kubeadm_node_addresses | ansible.utils.ipaddr('address') | first }}"
skipPhases: {{ skip_phases['join'] | default([]) | to_yaml }}
{% if is_master | default(False) -%}
controlPlane:
localAPIEndpoint:
- advertiseAddress: "{{ node_addresses | ansible.utils.ipaddr('address') | first }}"
+ advertiseAddress: "{{ kubeadm_node_addresses | ansible.utils.ipaddr('address') | first }}"
bindPort: {{ api_bind_port }}
certificateKey: "{{ cert_key }}"
{%- endif %}
diff --git a/roles/kubeadm/vars/main.yml b/roles/kubeadm/vars/main.yml
index 7bf815a..553784d 100644
--- a/roles/kubeadm/vars/main.yml
+++ b/roles/kubeadm/vars/main.yml
@@ -1,9 +1,9 @@
# yamllint disable rule:line-length
---
-cgroup_driver: systemd
-ipv4_extra_args: "{{ {'node-cidr-mask-size-ipv4': ipv4_node_mask | string} if (pod_cidrs | ansible.utils.ipv4 | length > 0) else {} }}"
-ipv6_extra_args: "{{ {'node-cidr-mask-size-ipv6': ipv6_node_mask | string} if (pod_cidrs | ansible.utils.ipv6 | length > 0) else {} }}"
+kubeadm_cgroup_driver: systemd
+kubeadm_ipv4_extra_args: "{{ {'node-cidr-mask-size-ipv4': ipv4_node_mask | string} if (pod_cidrs | ansible.utils.ipv4 | length > 0) else {} }}"
+kubeadm_ipv6_extra_args: "{{ {'node-cidr-mask-size-ipv6': ipv6_node_mask | string} if (pod_cidrs | ansible.utils.ipv6 | length > 0) else {} }}"
# Consistently choose a single host as the kubeadm init target in preference
# to run_once, etc. This helps the role to recover from temporary errors by
# reusing the same token and cert hashes already created.
-init_target: "{{ ansible_play_hosts_all | map('extract', hostvars) | selectattr('k8s', 'defined') | selectattr('k8s.master', 'defined') | selectattr('k8s.master') | map(attribute='inventory_hostname') | list | sort | first }}"
+kubeadm_init_target: "{{ ansible_play_hosts_all | map('extract', hostvars) | selectattr('k8s', 'defined') | selectattr('k8s.master', 'defined') | selectattr('k8s.master') | map(attribute='inventory_hostname') | list | sort | first }}"
diff --git a/roles/longhorn/defaults/main.yml b/roles/longhorn/defaults/main.yml
index 78e2f9a..b0771d7 100644
--- a/roles/longhorn/defaults/main.yml
+++ b/roles/longhorn/defaults/main.yml
@@ -1,3 +1,3 @@
----
# defaults file for longhorn
-version: v1.4.1
+---
+longhorn_version: v1.4.1
diff --git a/roles/longhorn/tasks/main.yml b/roles/longhorn/tasks/main.yml
index 2ed80a8..9e80a8f 100644
--- a/roles/longhorn/tasks/main.yml
+++ b/roles/longhorn/tasks/main.yml
@@ -2,7 +2,7 @@
---
- name: Install required packages
ansible.builtin.package:
- name: "{{ packages[ansible_os_family] }}"
+ name: "{{ longhorn_packages[ansible_os_family] }}"
state: present
- name: Enable iscsid unit
ansible.builtin.systemd:
@@ -16,7 +16,7 @@
register: tmpfile
- name: Get longhorn manifest
ansible.builtin.get_url:
- url: "{{ base_url }}/{{ version }}/deploy/longhorn.yaml"
+ url: "{{ longhorn_base_url }}/{{ longhorn_version }}/deploy/longhorn.yaml"
dest: "{{ tmpfile.path }}"
mode: '0640'
when: tmpfile.path is defined
@@ -52,7 +52,7 @@
definition:
spec:
type: LoadBalancer
- when: enable_frontend_loadbalancer | default(False)
+ when: longhorn_enable_frontend_loadbalancer | default(False)
delegate_to: 127.0.0.1
become: false
run_once: true
diff --git a/roles/longhorn/vars/main.yml b/roles/longhorn/vars/main.yml
index 3d64fea..597ee93 100644
--- a/roles/longhorn/vars/main.yml
+++ b/roles/longhorn/vars/main.yml
@@ -1,10 +1,10 @@
---
-base_url: https://raw.githubusercontent.com/longhorn/longhorn
-packages:
+longhorn_base_url: https://raw.githubusercontent.com/longhorn/longhorn
+longhorn_packages:
Debian:
- open-iscsi
- nfs-common
RedHat:
- iscsi-initiator-utils
- nfs-utils
-enable_frontend_loadbalancer: true
+longhorn_enable_frontend_loadbalancer: true
diff --git a/roles/networking/.yamllint b/roles/networking/.yamllint
deleted file mode 100644
index 8827676..0000000
--- a/roles/networking/.yamllint
+++ /dev/null
@@ -1,33 +0,0 @@
----
-# Based on ansible-lint config
-extends: default
-
-rules:
- braces:
- max-spaces-inside: 1
- level: error
- brackets:
- max-spaces-inside: 1
- level: error
- colons:
- max-spaces-after: -1
- level: error
- commas:
- max-spaces-after: -1
- level: error
- comments: disable
- comments-indentation: disable
- document-start: disable
- empty-lines:
- max: 3
- level: error
- hyphens:
- level: error
- indentation: disable
- key-duplicates: enable
- line-length: disable
- new-line-at-end-of-file: disable
- new-lines:
- type: unix
- trailing-spaces: disable
- truthy: disable
diff --git a/roles/networking/defaults/main.yml b/roles/networking/defaults/main.yml
index 26cf3df..9228f37 100644
--- a/roles/networking/defaults/main.yml
+++ b/roles/networking/defaults/main.yml
@@ -1,3 +1,3 @@
---
-link_device: bond0
-vlans: {}
+networking_link_device: bond0
+networking_vlans: {}
diff --git a/roles/networking/handlers/main.yml b/roles/networking/handlers/main.yml
index 203530c..971e9dc 100644
--- a/roles/networking/handlers/main.yml
+++ b/roles/networking/handlers/main.yml
@@ -9,7 +9,7 @@
register: apply_netplan
changed_when: apply_netplan.rc == 0
- name: Update con
- ansible.builtin.command: nmcli con up {{ link_device }}.{{ item }}
+ ansible.builtin.command: nmcli con up {{ networking_link_device }}.{{ item }}
register: con_up
changed_when: con_up.rc == 0
- loop: "{{ (vlans | default({})).keys() | list }}"
+ loop: "{{ (networking_vlans | default({})).keys() | list }}"
diff --git a/roles/networking/tasks/main.yml b/roles/networking/tasks/main.yml
index da2caac..e17d8a3 100644
--- a/roles/networking/tasks/main.yml
+++ b/roles/networking/tasks/main.yml
@@ -1,5 +1,4 @@
# Setup static vlan configurations post-deployment
-# yamllint disable rule:line-length
---
- name: Configure VLANs on Ubuntu
ansible.builtin.template:
@@ -9,26 +8,27 @@
group: root
mode: '0640'
when: ansible_distribution == "Ubuntu"
- loop: "{{ vlans | dict2items }}"
+ loop: "{{ networking_vlans | dict2items }}"
notify:
- Validate netplan
- name: Create VLAN connections on CentOS/RHEL
ansible.builtin.command: >-
- nmcli connection add type vlan ifname {{ link_device }}.{{ item }}
- con-name {{ link_device }}.{{ item }}
- vlan.parent {{ link_device }}
+ nmcli connection add type vlan ifname {{ networking_link_device }}.{{ item }}
+ con-name {{ networking_link_device }}.{{ item }}
+ vlan.parent {{ networking_link_device }}
vlan.id {{ item }}
ipv4.method disabled
ipv6.method disabled
register: add_vlan
- when: ansible_os_family == "RedHat" and ansible_facts['%s.%d'|format(link_device, item)] is undefined
- loop: "{{ vlans.keys() | list }}"
+ when: ansible_os_family == "RedHat" and ansible_facts['%s.%d'|format(networking_link_device, item)] is undefined
+ loop: "{{ networking_vlans.keys() | list }}"
changed_when: add_vlan.rc == 0
notify:
- Update con
- name: Configure VLAN connections on CentOS/RHEL
+ # yamllint disable rule:line-length
ansible.builtin.command: >-
- nmcli connection modify {{ link_device }}.{{ item.key }}{% if item.value['addresses'] | default([]) | ansible.utils.ipv4('host/prefix') | length > 0 %}
+ nmcli connection modify {{ networking_link_device }}.{{ item.key }}{% if item.value['addresses'] | default([]) | ansible.utils.ipv4('host/prefix') | length > 0 %}
ipv4.method manual
ipv4.never-default true
ipv4.addresses "{{ item.value['addresses'] | default([]) | ansible.utils.ipv4('host/prefix') | join(',') }}"{% if item.value['nameservers']['addresses'] | default([]) | ansible.utils.ipv4 | length > 0 %}
@@ -41,9 +41,10 @@
ipv6.dns "{{ item.value['nameservers']['addresses'] | default([]) | ansible.utils.ipv6 | join(' ') }}"{% endif %}{% if item.value['routes'] | default([]) | selectattr('to', 'ansible.utils.ipv6') | length > 0 %}
ipv6.routes "{% for route in item.value['routes'] | default([]) | selectattr('to', 'ansible.utils.ipv6') %}{% if loop.index > 1 %},{% endif %}{{ route['to'] }} {{ route['via'] }}{% if route['metric'] is defined %} {{ route['metric'] }}{% endif %}{% if route['from'] is defined %} src={{ route['from'] }}{% endif %}{% if route['table'] is defined %} table={{ route['table'] }}{% endif %}{% endfor %}"{% endif %}{% if item.value['routing-policy'] | default([]) | selectattr('from', 'ansible.utils.ipv6') | length > 0 %}
ipv6.routing-rules "{% for rule in item.value['routing-policy'] | default([]) | selectattr('from', 'ansible.utils.ipv6') %}{% if loop.index > 1 %},{% endif %}{% if rule['priority'] %}priority {{ rule['priority'] }} {% endif %}from {{ rule['from'] }}{% if rule['table'] is defined %} table {{ rule['table'] }}{% endif %}{% endfor %}"{% endif %}{% endif %}
+ # yamllint enable rule:line-length
register: modify_vlan
when: ansible_os_family == "RedHat"
- loop: "{{ vlans | dict2items }}"
+ loop: "{{ networking_vlans | dict2items }}"
changed_when: modify_vlan.rc == 0
notify:
- Update con
diff --git a/roles/networking/templates/netplan-vlan.yaml.j2 b/roles/networking/templates/netplan-vlan.yaml.j2
index 0ff0c11..33a558f 100644
--- a/roles/networking/templates/netplan-vlan.yaml.j2
+++ b/roles/networking/templates/netplan-vlan.yaml.j2
@@ -3,9 +3,9 @@
network:
version: 2
vlans:
- {{ link_device }}.{{ item.key }}:
+ {{ networking_link_device }}.{{ item.key }}:
dhcp4: no
dhcp6: no
id: {{ item.key }}
- link: {{ link_device }}
+ link: {{ networking_link_device }}
{{ item.value | to_nice_yaml | indent(6) }}
diff --git a/roles/pre-kube/README.md b/roles/pre_kube/README.md
similarity index 100%
rename from roles/pre-kube/README.md
rename to roles/pre_kube/README.md
diff --git a/roles/pre-kube/defaults/main.yml b/roles/pre_kube/defaults/main.yml
similarity index 100%
rename from roles/pre-kube/defaults/main.yml
rename to roles/pre_kube/defaults/main.yml
diff --git a/roles/pre-kube/handlers/main.yml b/roles/pre_kube/handlers/main.yml
similarity index 84%
rename from roles/pre-kube/handlers/main.yml
rename to roles/pre_kube/handlers/main.yml
index ef3b8b4..f630b5e 100644
--- a/roles/pre-kube/handlers/main.yml
+++ b/roles/pre_kube/handlers/main.yml
@@ -1,5 +1,5 @@
---
-- name: reboot
+- name: Reboot
ansible.builtin.reboot:
post_reboot_delay: 120
reboot_timeout: 300
diff --git a/roles/pre-kube/meta/main.yml b/roles/pre_kube/meta/main.yml
similarity index 100%
rename from roles/pre-kube/meta/main.yml
rename to roles/pre_kube/meta/main.yml
diff --git a/roles/pre-kube/tasks/main.yml b/roles/pre_kube/tasks/main.yml
similarity index 68%
rename from roles/pre-kube/tasks/main.yml
rename to roles/pre_kube/tasks/main.yml
index 0026292..2e1460f 100644
--- a/roles/pre-kube/tasks/main.yml
+++ b/roles/pre_kube/tasks/main.yml
@@ -1,10 +1,4 @@
---
-# - name: Prepare Debian/Ubuntu hosts
-# ansible.builtin.import_tasks: debian.yml
-# when: ansible_os_family == "Debian"
-# - name: Prepare CentOS/RHEL hosts
-# ansible.builtin.import_tasks: rhel.yml
-# when: ansible_os_family == "RedHat"
- name: Configure persistent modules for Kubernetes
ansible.builtin.template:
src: templates/modules-load.conf.j2
@@ -16,7 +10,7 @@
community.general.modprobe:
name: "{{ item }}"
state: present
- loop: "{{ required_modules }}"
+ loop: "{{ pre_kube_required_modules }}"
- name: Set persistent sysctl params for Kubernetes
ansible.posix.sysctl:
name: "{{ item['param'] }}"
@@ -25,8 +19,8 @@
sysctl_set: true
reload: true
state: present
- loop: "{{ sysctls }}"
-- name: Disable swap
+ loop: "{{ pre_kube_sysctls }}"
+- name: Disable swap # noqa no-changed-when # The role should always ensure swap is disabled
ansible.builtin.command: swapoff -a
- name: Remove swapfs
ansible.posix.mount:
@@ -36,17 +30,17 @@
- name: Disable SELinux
ansible.posix.selinux:
state: disabled
- ignore_errors: true
+ ignore_errors: true # noqa ignore-errors # SELinux could be enabled on any repo or without a standard set of tools
notify:
- - reboot
+ - Reboot
- name: Disable ufw, if installed
community.general.ufw:
state: disabled
- ignore_errors: true
+ ignore_errors: true # noqa ignore-errors # ufw is not consistently installed
- name: Disable firewalld, if present
ansible.builtin.systemd:
name: firewalld
enabled: false
masked: true
state: stopped
- ignore_errors: true
+ ignore_errors: true # noqa ignore-errors # firewalld is not consistently installed
diff --git a/roles/pre-kube/templates/modules-load.conf.j2 b/roles/pre_kube/templates/modules-load.conf.j2
similarity index 70%
rename from roles/pre-kube/templates/modules-load.conf.j2
rename to roles/pre_kube/templates/modules-load.conf.j2
index c19c12e..517fb21 100644
--- a/roles/pre-kube/templates/modules-load.conf.j2
+++ b/roles/pre_kube/templates/modules-load.conf.j2
@@ -1,5 +1,5 @@
# Persistent module configuration for kubernetes
{{ ansible_managed | comment }}
-{% for mod in required_modules %}
+{% for mod in pre_kube_required_modules %}
{{ mod }}
{% endfor %}
diff --git a/roles/pre-kube/tests/inventory b/roles/pre_kube/tests/inventory
similarity index 100%
rename from roles/pre-kube/tests/inventory
rename to roles/pre_kube/tests/inventory
diff --git a/roles/pre-kube/tests/test.yml b/roles/pre_kube/tests/test.yml
similarity index 100%
rename from roles/pre-kube/tests/test.yml
rename to roles/pre_kube/tests/test.yml
diff --git a/roles/pre-kube/vars/main.yml b/roles/pre_kube/vars/main.yml
similarity index 86%
rename from roles/pre-kube/vars/main.yml
rename to roles/pre_kube/vars/main.yml
index e99afc5..09acf1b 100644
--- a/roles/pre-kube/vars/main.yml
+++ b/roles/pre_kube/vars/main.yml
@@ -1,7 +1,7 @@
---
-required_modules:
+pre_kube_required_modules:
- br_netfilter
-sysctls:
+pre_kube_sysctls:
- param: net.bridge.bridge-nf-call-iptables
value: 1
- param: net.bridge.bridge-nf-call-ip6tables
diff --git a/roles/shared/defaults/main.yml b/roles/shared/defaults/main.yml
index 3318a5e..453c325 100644
--- a/roles/shared/defaults/main.yml
+++ b/roles/shared/defaults/main.yml
@@ -1,6 +1,6 @@
---
# List of acceptable CIDRs for NFS export
-nfs_export_shared_cidrs:
+shared_nfs_export_shared_cidrs:
- 172.16.0.0/23
- 172.16.10.0.0/23
- 172.16.96.0/20
@@ -11,7 +11,7 @@ nfs_export_shared_cidrs:
# List of MAC addresses that will be allowed to autoprovision via TFTP.
# This list must match the MAC address of the first NIC (eno1) in each physical
# server.
-autoprovision_macs:
+shared_autoprovision_macs:
- 01-14-fe-b5-c9-73-12
- 01-d4-ae-52-94-b1-a6
- 01-d4-ae-52-98-b7-a8
diff --git a/roles/shared/tasks/main.yml b/roles/shared/tasks/main.yml
index 73ac281..7227b5c 100644
--- a/roles/shared/tasks/main.yml
+++ b/roles/shared/tasks/main.yml
@@ -1,5 +1,4 @@
# Update shared server
-# yamllint disable rule:line-length
---
- name: Install required packages
ansible.builtin.package:
@@ -127,6 +126,7 @@
- name: Copy Debian netboot files for TFTP
ansible.builtin.copy:
remote_src: true
+ # yamllint disable-line rule:line-length
src: "/usr/lib/debian-installer/images/{{ ansible_distribution_major_version }}/amd64/text/debian-installer/amd64/{{ item }}"
dest: "/srv/tftp/boot/debian{{ ansible_distribution_major_version }}-x86_64/{{ item }}"
force: true
@@ -240,7 +240,7 @@
owner: tftp
group: root
mode: '0644'
- loop: "{{ autoprovision_macs | map('regex_replace', ':', '-') | map('lower') | list }}"
+ loop: "{{ shared_autoprovision_macs | map('regex_replace', ':', '-') | map('lower') | list }}"
tags:
- pxe
- name: Create hard-links for server MACs to autoprovision via EFI32
@@ -252,7 +252,7 @@
owner: tftp
group: root
mode: '0644'
- loop: "{{ autoprovision_macs | map('regex_replace', ':', '-') | map('lower') | list }}"
+ loop: "{{ shared_autoprovision_macs | map('regex_replace', ':', '-') | map('lower') | list }}"
tags:
- pxe
- name: Create hard-links for server MACs to autoprovision via EFI64
@@ -264,7 +264,7 @@
owner: tftp
group: root
mode: '0644'
- loop: "{{ autoprovision_macs | map('regex_replace', ':', '-') | map('lower') | list }}"
+ loop: "{{ shared_autoprovision_macs | map('regex_replace', ':', '-') | map('lower') | list }}"
tags:
- pxe
- name: Start tftpd-hpa
@@ -276,7 +276,7 @@
- pxe
- name: Update CA cert
ansible.builtin.copy:
- src: "{{ ca_cert_pem }}"
+ src: "{{ shared_ca_cert_pem }}"
dest: "{{ item }}/acceleratedgcp-root-ca.crt"
force: true
owner: root
@@ -348,7 +348,7 @@
- http
- name: Copy CA file
ansible.builtin.copy:
- src: "{{ ca_cert_pem }}"
+ src: "{{ shared_ca_cert_pem }}"
dest: /srv/shared/certs/
force: true
owner: root
diff --git a/roles/shared/templates/etc/exports.j2 b/roles/shared/templates/etc/exports.j2
index 5c1fdd1..8691778 100644
--- a/roles/shared/templates/etc/exports.j2
+++ b/roles/shared/templates/etc/exports.j2
@@ -1,3 +1,3 @@
# NFS exports
{{ ansible_managed | comment }}
-/srv/shared{% for cidr in nfs_export_shared_cidrs %} {{ cidr }}(ro,all_squash,no_subtree_check){% endfor %}
+/srv/shared{% for cidr in shared_nfs_export_shared_cidrs %} {{ cidr }}(ro,all_squash,no_subtree_check){% endfor %}
diff --git a/roles/shared/vars/main.yml b/roles/shared/vars/main.yml
index 606bd27..94d5762 100644
--- a/roles/shared/vars/main.yml
+++ b/roles/shared/vars/main.yml
@@ -1,6 +1,6 @@
---
# Path to externally generated root CA certificate
-ca_cert_pem: ./ca/acceleratedgcp-root-ca.pem
+shared_ca_cert_pem: ./ca/acceleratedgcp-root-ca.pem
# Path to externally generated shared server TLS certificate and key
shared_tls_cert_pem: ./certs/{{ lab_shared_host }}.chain.pem
shared_tls_key_pem: ./certs/{{ lab_shared_host }}.key.pem
diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml
index c99f26f..b8f83a1 100644
--- a/roles/vault/defaults/main.yml
+++ b/roles/vault/defaults/main.yml
@@ -1,9 +1,9 @@
---
-hashi_gpg_key: files/hashicorp-archive-keyring.gpg
+vault_hashi_gpg_key: files/hashicorp-archive-keyring.gpg
vault_port: 8200
vault_host: "vault.{{ lab_domain }}"
vault_url: "https://{{ vault_host }}:{{ vault_port }}"
-vault_cluster_url: "https://{{ vault_host }}:{{ vault_port + 1}}"
+vault_cluster_url: "https://{{ vault_host }}:{{ vault_port + 1 }}"
vault_home: /opt/vault
vault_gcs: 18ad5ebfd63af0e4
vault_tls_disable: false
diff --git a/roles/vault/tasks/main.yml b/roles/vault/tasks/main.yml
index 257311c..b457635 100644
--- a/roles/vault/tasks/main.yml
+++ b/roles/vault/tasks/main.yml
@@ -1,9 +1,8 @@
# Manage Vault service
-# yamllint disable rule:line-length
---
- name: Copy static hashicorp key
ansible.builtin.copy:
- src: "{{ hashi_gpg_key }}"
+ src: "{{ vault_hashi_gpg_key }}"
dest: /usr/share/keyrings/hashicorp-archive-keyring.gpg
force: true
owner: root
@@ -15,6 +14,7 @@
- bootstrap
- name: Add Hashicorp apt repo
ansible.builtin.apt_repository:
+ # yamllint disable-line rule:line-length
repo: "deb [arch=amd64 signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com {{ ansible_distribution_release }} main"
state: present
filename: hashicorp
diff --git a/shared.yml b/shared.yml
index 57ff8e5..886a00e 100644
--- a/shared.yml
+++ b/shared.yml
@@ -1,15 +1,14 @@
# Update shared server playbook
# In my lab, the shared group hosts both shared file services and vault services
# code: language=ansible
-# yamllint disable rule:truthy rule:line-length
---
- name: Update shared services and files
hosts: shared
- become: yes
+ become: true
roles:
- shared
- name: Update Vault service
hosts: vault
- become: yes
+ become: true
roles:
- vault
diff --git a/un-kubernetes.yml b/un-kubernetes.yml
index 8dc63e7..c26bfce 100644
--- a/un-kubernetes.yml
+++ b/un-kubernetes.yml
@@ -4,12 +4,11 @@
hosts: servers
become: true
gather_facts: true
-
tasks:
- name: Get installed packages
ansible.builtin.package_facts:
manager: auto
- - name: Uninstall cilium
+ - name: Uninstall cilium # noqa ignore-errors
kubernetes.core.helm:
release_name: cilium
release_namespace: kube-system
@@ -20,7 +19,7 @@
become: false
run_once: true
ignore_errors: true
- - name: Reset kubeadm
+ - name: Reset kubeadm # noqa no-changed-when
ansible.builtin.command: kubeadm reset --force
when: "'kubeadm' in ansible_facts.packages"
- name: Disable kubelet unit
@@ -30,16 +29,16 @@
state: stopped
daemon_reload: true
when: "'kubelet' in ansible_facts.packages"
- - name: Remove all pods
- ansible.builtin.command: crictl rmp --all --force
+ - name: Remove all pods # noqa ignore-errors
+ ansible.builtin.command: crictl rmp --all --force # noqa no-changed-when
ignore_errors: true
when: "'cri-tools' in ansible_facts.packages"
- - name: Remove all containers
- ansible.builtin.command: crictl rm --all
+ - name: Remove all containers # noqa ignore-errors
+ ansible.builtin.command: crictl rm --all # noqa no-changed-when
ignore_errors: true
when: "'cri-tools' in ansible_facts.packages"
- - name: Remove all images
- ansible.builtin.command: crictl rmi --all
+ - name: Remove all images # noqa ignore-errors
+ ansible.builtin.command: crictl rmi --all # noqa no-changed-when
ignore_errors: true
when: "'cri-tools' in ansible_facts.packages"
- name: Disable CRI-O
diff --git a/vault/README.md b/vault/README.md
index 4fc82ae..0a7f445 100644
--- a/vault/README.md
+++ b/vault/README.md
@@ -134,7 +134,7 @@ Unseal token (leave empty when done):
Vault should now be running with TLS certs generated by Vault itself; at this point it can be unsealed, and used with OIDC tokens.
-
+
## Requirements
| Name | Version |
@@ -197,5 +197,5 @@ No modules.
## Outputs
No outputs.
-
+
diff --git a/vault/root-otp-login.sh b/vault/root-otp-login.sh
index 51deb64..f9f4632 100755
--- a/vault/root-otp-login.sh
+++ b/vault/root-otp-login.sh
@@ -12,13 +12,14 @@ JSON="$(vault operator generate-root -init -format=json)"
NONCE="$(echo "${JSON}" | jq -r '.nonce')"
OTP="$(echo "${JSON}" | jq -r '.otp')"
while true; do
- read -s -p "Unseal token (leave empty when done): " token
+ # shellcheck disable=SC3045
+ read -r -s -p "Unseal token (leave empty when done): " token
echo
test -z "${token}" && break
- set -- "$@" ${token}
+ set -- "$@" "${token}"
done
for KEY in "$@"; do
- ENC_TOKEN="$(vault operator generate-root -format=json -nonce ${NONCE} ${KEY} | jq -r '.encoded_token')"
+ ENC_TOKEN="$(vault operator generate-root -format=json -nonce "${NONCE}" "${KEY}" | jq -r '.encoded_token')"
done
vault login "$(vault operator generate-root -decode "${ENC_TOKEN}" -otp "${OTP}")"
unset JSON NONCE OTP ENC_TOKEN KEY