diff --git a/Tests/config.toml b/Tests/config.toml index 6b7a5c71f..beafe1756 100644 --- a/Tests/config.toml +++ b/Tests/config.toml @@ -42,6 +42,10 @@ subjects = [ "kind-current", "kind-current-1", "kind-current-2", + #"moin-cluster-current", + #"moin-cluster-current-1", + "moin-cluster-current-2", + ] workers = 1 # better restrict this with clusters running on local machine @@ -82,3 +86,21 @@ kube_plugin = "kind" kube_plugin_config = "kaas/kind_config.yaml" clusterspec_cluster = "current-k8s-release-2" + +[subjects.moin-cluster-current.kubernetes_setup] +kube_plugin = "cluster-stacks-api" +kube_plugin_config = "../playbooks/k8s_configs/moin_cluster_config.yaml" +clusterspec_cluster = "current-k8s-release" + + +[subjects.moin-cluster-current-1.kubernetes_setup] +kube_plugin = "cluster-stacks-api" +kube_plugin_config = "../playbooks/k8s_configs/moin_cluster_config.yaml" +clusterspec_cluster = "current-k8s-release-1" + + +[subjects.moin-cluster-current-2.kubernetes_setup] +kube_plugin = "cluster-stacks-api" +kube_plugin_config = "../playbooks/k8s_configs/moin_cluster_config.yaml" +clusterspec_cluster = "current-k8s-release-2" + diff --git a/Tests/kaas/README.md b/Tests/kaas/README.md index 6a9737dda..9640d8ff4 100644 --- a/Tests/kaas/README.md +++ b/Tests/kaas/README.md @@ -48,11 +48,6 @@ In addition to the core requirements, ensure the following are set up: * An **OpenStack** environment configured and accessible. * A `clouds.yaml` file defining OpenStack credentials and endpoints. -* An installing the envsubst via this command: - - ```bash - GOBIN=/tmp go install github.com/drone/envsubst/v2/cmd/envsubst@latest - ``` ### Environment Variables diff --git a/Tests/kaas/plugin/plugin_cluster_stacks_remote_api.py b/Tests/kaas/plugin/plugin_cluster_stacks_remote_api.py new file mode 100644 index 000000000..25d0eb857 --- /dev/null +++ b/Tests/kaas/plugin/plugin_cluster_stacks_remote_api.py @@ -0,0 +1,318 @@ +import os +import yaml +import subprocess +import time +import logging +from interface import KubernetesClusterPlugin + +logger = logging.getLogger("PluginClusterStacks") + + +# Default configuration values +DEFAULTS = { + 'cs_name': 'scs', +} + +# Keys needed for environment variables +ENV_KEYS = {'cs_name', 'cs_version', 'cs_channel', 'cs_secretname', 'cs_class_name', + 'cs_namespace', 'cs_pod_cidr', 'cs_service_cidr', 'cs_external_id', 'cs_k8s_patch_version', + 'cs_cluster_name', 'cs_k8s_version'} + + +# Helper functions +def wait_for_pods(self, namespaces, timeout=240, interval=15, kubeconfig=None): + """ + Waits for all pods in specified namespaces to reach the condition 'Ready'. + + :param namespaces: List of namespaces to check for pod readiness. + :param timeout: Total time to wait in seconds before giving up. + :param interval: Time to wait between checks in seconds. + :param kubeconfig: Optional path to the kubeconfig file for the target Kubernetes cluster. + :return: True if all pods are ready within the given timeout, raises TimeoutError otherwise. + """ + start_time = time.time() + + while time.time() - start_time < timeout: + all_pods_ready = True + + for namespace in namespaces: + try: + # Get pod status in the namespace + wait_pods_command = f"kubectl wait -n {namespace} --for=condition=Ready --timeout={timeout}s pod --all" + result = self._run_subprocess( + wait_pods_command, + f"Error fetching pods in {namespace}", + shell=True, + capture_output=True, + text=True, + kubeconfig=kubeconfig + ) + + if result.returncode != 0: + logger.warning( + f"Not all pods in namespace {namespace} are ready. Details: {result.stderr}" + ) + all_pods_ready = False + else: + logger.info(f"All pods in namespace {namespace} are ready.") + + except subprocess.CalledProcessError as error: + logger.error(f"Error checking pods in {namespace}: {error}") + all_pods_ready = False + + if all_pods_ready: + logger.info("All specified pods are ready in all namespaces.") + return True + + logger.info("Waiting for all pods in specified namespaces to become ready...") + time.sleep(interval) + + raise TimeoutError( + f"Timed out after {timeout} seconds waiting for pods in namespaces {namespaces} to become ready." + ) + + +def load_config(config_path): + """ + Loads the configuration from a YAML file. + """ + + with open(config_path, "r") as file: + config = yaml.safe_load(file) or {} + + base_dir = os.path.dirname(config_path) + if 'kubeconfig' in config: + config['kubeconfig'] = os.path.join(base_dir, config['kubeconfig']) + if 'workloadcluster' in config: + config['workloadcluster'] = os.path.join(base_dir, config['workloadcluster']) + if 'clusterstack' in config: + config['clusterstack'] = os.path.join(base_dir, config['clusterstack']) + + return config + + +def setup_environment_variables(self): + """ + Constructs and returns a dictionary of required environment variables + based on the configuration. + + :raises ValueError: If the `GIT_ACCESS_TOKEN` environment variable is not set. + + :return: A dictionary of required environment variables with necessary values and + encodings for Kubernetes and Git-related configurations. + """ + # Calculate values that need to be set dynamically + if hasattr(self, 'cluster_version'): + self.config['cs_k8s_version'] = self.cluster_version + self.config['cs_namespace'] = self.cs_namespace + self.config['cs_class_name'] = ( + f"openstack-{self.config['cs_name']}-{str(self.config['cs_k8s_version']).replace('.', '-')}-" + f"{self.config['cs_version']}" + ) + if hasattr(self, 'cluster_name'): + self.config['cs_cluster_name'] = self.cluster_name + + # Construct general environment variables + required_env = {key.upper(): value for key, value in self.config.items() if key in ENV_KEYS} + + return required_env + + +class PluginClusterStacksRemoteAPI(KubernetesClusterPlugin): + def __init__(self, config_file): + self.config = load_config(config_file) if config_file else {} + logger.debug(self.config) + self.working_directory = os.getcwd() + logger.debug(f"Working from {self.working_directory}") + for key, value in DEFAULTS.items(): + self.config.setdefault(key, value) + self.kubeconfig_mgmnt = self.config["kubeconfig"] + self.workloadclusters = self.config["workloadcluster"] + self.clusterstack = self.config["clusterstack"] + self.cs_namespace = self.config["namespace"] + + def create_cluster(self, cluster_name, version, kubeconfig_filepath): + self.cluster_name = cluster_name + self.cluster_version = version + self.kubeconfig_cs_cluster = kubeconfig_filepath + + # Create cluster-stack resource + self._apply_yaml(self.clusterstack, "Error applying clusterstack.yaml", kubeconfig=self.kubeconfig_mgmnt) + + # Wait for cluster-stack resource to be ready + self._wait_for_clusterstack_ready(namespace=self.cs_namespace, timeout=600) + + # Create workload cluster + self._apply_yaml( + self.workloadclusters, + "Error applying cluster.yaml", + kubeconfig=self.kubeconfig_mgmnt, + ) + + # Get and wait on kubeadmcontrolplane and retrieve workload cluster kubeconfig + kcp_name = self._get_kubeadm_control_plane_name(namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt) + self._wait_kcp_ready(kcp_name, namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt) + self._retrieve_kubeconfig(namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt) + + # Wait for workload system pods to be ready + wait_for_pods(self, ["kube-system"], timeout=600, interval=15, kubeconfig=self.kubeconfig_cs_cluster) + + def delete_cluster(self, cluster_name): + try: + # Check if the cluster exists + check_cluster_command = f"kubectl get cluster {cluster_name} -n {self.cs_namespace}" + result = self._run_subprocess( + check_cluster_command, + "Failed to get cluster resource", + shell=True, + capture_output=True, + text=True, + kubeconfig=self.kubeconfig_mgmnt + ) + + # Proceed with deletion only if the cluster exists + if result.returncode == 0: + delete_command = f"kubectl delete cluster {cluster_name} --timeout=600s -n {self.cs_namespace}" + self._run_subprocess( + delete_command, "Timeout while deleting the cluster", shell=True, kubeconfig=self.kubeconfig_mgmnt + ) + + except subprocess.CalledProcessError as error: + if "NotFound" in error.stderr: + logger.info( + f"Cluster {cluster_name} not found. Skipping deletion." + ) + else: + raise RuntimeError(f"Error checking for cluster existence: {error}") + + def _apply_yaml(self, yaml_file, error_msg, kubeconfig=None): + """ + Applies a Kubernetes YAML configuration file to the cluster, substituting environment variables as needed. + + :param yaml_file: The name of the YAML file to apply. + :param kubeconfig: Optional path to a kubeconfig file, which specifies which Kubernetes cluster + to apply the YAML configuration to. + """ + try: + # Determine if the file is a local path or a URL + if os.path.isfile(yaml_file): + command = f"envsubst < {yaml_file} | kubectl apply -f -" + else: + raise ValueError(f"Unknown file: {yaml_file}") + + self._run_subprocess(command, error_msg, shell=True, kubeconfig=kubeconfig) + + except subprocess.CalledProcessError as error: + raise RuntimeError(f"{error_msg}: {error}") + + def _wait_for_clusterstack_ready(self, namespace, timeout=600): + """ + Waits for the clusterstack resource in the management cluster to reach the condition 'Ready'. + + :param namespace: The namespace to search for the clusterstack resource. + :param timeout: The maximum time to wait in seconds. + :raises RuntimeError: If the clusterstack resource does not become ready within the timeout. + """ + try: + command = f"kubectl wait clusterstack/clusterstack -n {namespace} --for=condition=Ready --timeout={timeout}s" + self._run_subprocess( + command, + "Error waiting for clusterstack to be ready", + shell=True, + kubeconfig=self.kubeconfig_mgmnt + ) + logger.info("Clusterstack is ready.") + except subprocess.CalledProcessError as error: + raise RuntimeError(f"Clusterstack did not become ready within {timeout} seconds: {error}") + + def _get_kubeadm_control_plane_name(self, namespace="default", kubeconfig=None): + """ + Retrieves the name of the KubeadmControlPlane resource for the Kubernetes cluster + in the specified namespace. + + :param namespace: The namespace to search for the KubeadmControlPlane resource. + :param kubeconfig: Optional path to the kubeconfig file for the target Kubernetes cluster. + + :return: The name of the KubeadmControlPlane resource as a string. + """ + max_retries = 6 + delay_between_retries = 20 + for _ in range(max_retries): + try: + kcp_command = ( + f"kubectl get kubeadmcontrolplane -n {namespace} " + "-o=jsonpath='{.items[0].metadata.name}'" + ) + kcp_name = self._run_subprocess(kcp_command, "Error retrieving kcp_name", shell=True, capture_output=True, text=True, kubeconfig=kubeconfig) + logger.info(kcp_name) + kcp_name_stdout = kcp_name.stdout.strip() + if kcp_name_stdout: + print(f"KubeadmControlPlane name: {kcp_name_stdout}") + return kcp_name_stdout + except subprocess.CalledProcessError as error: + print(f"Error getting kubeadmcontrolplane name: {error}") + # Wait before retrying + time.sleep(delay_between_retries) + else: + raise RuntimeError("Failed to get kubeadmcontrolplane name") + + def _wait_kcp_ready(self, kcp_name, namespace="default", kubeconfig=None): + """ + Waits for the specified KubeadmControlPlane resource to become 'Available'. + + :param kcp_name: The name of the KubeadmControlPlane resource to check for availability. + :param namespace: The namespace where the KubeadmControlPlane resource is. + :param kubeconfig: Optional path to the kubeconfig file for the target Kubernetes cluster. + """ + try: + self._run_subprocess( + f"kubectl wait kubeadmcontrolplane/{kcp_name} --for=condition=Available --timeout=600s -n {namespace}", + "Error waiting for kubeadmcontrolplane availability", + shell=True, + kubeconfig=kubeconfig + ) + except subprocess.CalledProcessError as error: + raise RuntimeError(f"Error waiting for kubeadmcontrolplane to be ready: {error}") + + def _retrieve_kubeconfig(self, namespace="default", kubeconfig=None): + """ + Retrieves the kubeconfig for the specified cluster and saves it to a local file. + + :param namespace: The namespace of the cluster to retrieve the kubeconfig for. + :param kubeconfig: Optional path to the kubeconfig file for the target Kubernetes cluster. + """ + kubeconfig_command = ( + f"sudo -E clusterctl get kubeconfig {self.cluster_name} -n {namespace} > {self.kubeconfig_cs_cluster}" + ) + self._run_subprocess(kubeconfig_command, "Error retrieving kubeconfig", shell=True, kubeconfig=kubeconfig) + + def _run_subprocess(self, command, error_msg, shell=False, capture_output=False, text=False, kubeconfig=None): + """ + Executes a subprocess command with the specified environment variables and parameters. + + :param command: The shell command to be executed. This can be a string or a list of arguments to pass to the subprocess. + :param error_msg: A custom error message to be logged and raised if the subprocess fails. + :param shell: Whether to execute the command through the shell (default: `False`). + :param capture_output: Whether to capture the command's standard output and standard error (default: `False`). + :param text: Whether to treat the command's output and error as text (default: `False`). + :param kubeconfig: Optional path to the kubeconfig file for the target Kubernetes cluster. + + :return: The result of the `subprocess.run` command + """ + try: + env = setup_environment_variables(self) + env['PATH'] = f'/usr/local/bin:/usr/bin:{self.working_directory}' + # Set env variable DISPLAY which you need to open the oidc window automatically + env['DISPLAY'] = ':0' + env['HOME'] = self.working_directory + if kubeconfig: + env['KUBECONFIG'] = kubeconfig + + # Run the subprocess with the environment + result = subprocess.run(command, shell=shell, capture_output=capture_output, text=text, check=True, env=env) + + return result + + except subprocess.CalledProcessError as error: + logger.error(f"{error_msg}: {error}") + raise diff --git a/Tests/kaas/plugin/run_plugin.py b/Tests/kaas/plugin/run_plugin.py index 4c384d26f..3faae466c 100755 --- a/Tests/kaas/plugin/run_plugin.py +++ b/Tests/kaas/plugin/run_plugin.py @@ -8,11 +8,14 @@ from plugin_kind import PluginKind from plugin_static import PluginStatic from plugin_cluster_stacks import PluginClusterStacks +from plugin_cluster_stacks_remote_api import PluginClusterStacksRemoteAPI + PLUGIN_LOOKUP = { "kind": PluginKind, "static": PluginStatic, "cluster-stacks": PluginClusterStacks, + "cluster-stacks-api": PluginClusterStacksRemoteAPI, } diff --git a/Tests/kaas/requirements.in b/Tests/kaas/requirements.in index 640831e54..bf8cc3f50 100644 --- a/Tests/kaas/requirements.in +++ b/Tests/kaas/requirements.in @@ -1,3 +1,4 @@ pytest-kind kubernetes junitparser +envsubst diff --git a/Tests/kaas/requirements.txt b/Tests/kaas/requirements.txt index c36ca21d1..b85b94cc7 100644 --- a/Tests/kaas/requirements.txt +++ b/Tests/kaas/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # pip-compile requirements.in @@ -12,6 +12,8 @@ certifi==2024.8.30 # requests charset-normalizer==3.3.2 # via requests +envsubst==0.1.5 + # via -r requirements.in google-auth==2.34.0 # via kubernetes idna==3.8 diff --git a/playbooks/k8s_configs/moin_cluster_clusterstack.yaml b/playbooks/k8s_configs/moin_cluster_clusterstack.yaml new file mode 100644 index 000000000..c25833083 --- /dev/null +++ b/playbooks/k8s_configs/moin_cluster_clusterstack.yaml @@ -0,0 +1,30 @@ +# Cluster-stack and OpenStack cluster-stack release resource templates +apiVersion: clusterstack.x-k8s.io/v1alpha1 +kind: ClusterStack +metadata: + name: clusterstack + namespace: ${CS_NAMESPACE} +spec: + provider: openstack + name: ${CS_NAME} + kubernetesVersion: "${CS_K8S_VERSION}" + channel: ${CS_CHANNEL} + autoSubscribe: false + providerRef: + apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1 + kind: OpenStackClusterStackReleaseTemplate + name: cspotemplate + versions: + - ${CS_VERSION} +--- +apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1 +kind: OpenStackClusterStackReleaseTemplate +metadata: + name: cspotemplate + namespace: ${CS_NAMESPACE} +spec: + template: + spec: + identityRef: + kind: Secret + name: ${CS_SECRETNAME} diff --git a/playbooks/k8s_configs/moin_cluster_config.yaml b/playbooks/k8s_configs/moin_cluster_config.yaml new file mode 100644 index 000000000..dda27312b --- /dev/null +++ b/playbooks/k8s_configs/moin_cluster_config.yaml @@ -0,0 +1,16 @@ +kubeconfig: "moin_cluster_kubeconfig.yaml" +workloadcluster: "moin_cluster_workloadcluster.yaml" +clusterstack: "moin_cluster_clusterstack.yaml" +namespace: "kaas-playground8" + +# Cluster-stack related configuration +cs_name: "scs" # Cluster Stack Name +cs_version: "v1" # Cluster Stack Version +cs_channel: "stable" # Release channel +cs_secretname: "openstack" # Cloud name from OpenStack clouds.yaml + +# Cluster Information +cs_pod_cidr: "192.168.0.0/16" # Pod CIDR for networking +cs_service_cidr: "10.96.0.0/12" # Service CIDR for networking +cs_external_id: "ebfe5546-f09f-4f42-ab54-094e457d42ec" # External ID for the Cluster Stack +cs_k8s_patch_version: "9" # Kubernetes patch version to use diff --git a/playbooks/k8s_configs/moin_cluster_kubeconfig.yaml b/playbooks/k8s_configs/moin_cluster_kubeconfig.yaml new file mode 100644 index 000000000..235fa232c --- /dev/null +++ b/playbooks/k8s_configs/moin_cluster_kubeconfig.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +clusters: +- cluster: + server: https://moin.k8s.scs.community + name: moin-cluster +contexts: +- context: + cluster: moin-cluster + user: oidc + name: moin-cluster +current-context: moin-cluster +kind: Config +users: +- name: oidc + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - oidc-login + - get-token + - --oidc-issuer-url=https://dex.k8s.scs.community + - --oidc-client-id=kubectl + - --oidc-extra-scope=groups,profile + command: kubectl diff --git a/playbooks/k8s_configs/moin_cluster_workloadcluster.yaml b/playbooks/k8s_configs/moin_cluster_workloadcluster.yaml new file mode 100644 index 000000000..5b4af619b --- /dev/null +++ b/playbooks/k8s_configs/moin_cluster_workloadcluster.yaml @@ -0,0 +1,25 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CS_CLUSTER_NAME} + namespace: ${CS_NAMESPACE} + labels: + managed-secret: cloud-config +spec: + topology: + variables: + - name: controller_flavor + value: "SCS-2V-4-50" + - name: worker_flavor + value: "SCS-2V-4-50" + - name: external_id + value: ${CS_EXTERNAL_ID} + class: ${CS_CLASS_NAME} + controlPlane: + replicas: 3 + version: v${CS_K8S_VERSION}.${CS_K8S_PATCH_VERSION} + workers: + machineDeployments: + - class: default-worker + name: md-0 + replicas: 3