diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index 858dac8a4c..df89cffa49 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -3243,6 +3243,17 @@ spec: - iam-authenticator - aws-cli type: string + upgradePolicy: + description: |- + The cluster upgrade policy to use for the cluster. + (Official AWS docs for this policy: https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + `extended` upgrade policy indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. You will incur extended support charges with this setting. You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + `standard` upgrade policy indicates that the cluster is eligible for automatic upgrade at the end of standard support. You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + If omitted, new clusters will use the AWS default upgrade policy (which at the time of writing is "extended") and existing clusters will have their upgrade policy unchanged. + enum: + - extended + - standard + type: string version: description: |- Version defines the desired Kubernetes version. If no version number diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml index ad5c56c54b..450fd296b0 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml @@ -1044,6 +1044,17 @@ spec: - iam-authenticator - aws-cli type: string + upgradePolicy: + description: |- + The cluster upgrade policy to use for the cluster. + (Official AWS docs for this policy: https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + `extended` upgrade policy indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. You will incur extended support charges with this setting. You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + `standard` upgrade policy indicates that the cluster is eligible for automatic upgrade at the end of standard support. You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + If omitted, new clusters will use the AWS default upgrade policy (which at the time of writing is "extended") and existing clusters will have their upgrade policy unchanged. + enum: + - extended + - standard + type: string version: description: |- Version defines the desired Kubernetes version. If no version number diff --git a/controlplane/eks/api/v1beta1/conversion.go b/controlplane/eks/api/v1beta1/conversion.go index 0985ef66d5..b563a71324 100644 --- a/controlplane/eks/api/v1beta1/conversion.go +++ b/controlplane/eks/api/v1beta1/conversion.go @@ -122,6 +122,7 @@ func (r *AWSManagedControlPlane) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.RolePermissionsBoundary = restored.Spec.RolePermissionsBoundary dst.Status.Version = restored.Status.Version dst.Spec.BootstrapSelfManagedAddons = restored.Spec.BootstrapSelfManagedAddons + dst.Spec.UpgradePolicy = restored.Spec.UpgradePolicy return nil } diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go index 48f326b2dc..95ae9313a6 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.conversion.go +++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go @@ -384,6 +384,7 @@ func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControl if err := Convert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(&in.KubeProxy, &out.KubeProxy, s); err != nil { return err } + // WARNING: in.UpgradePolicy requires manual conversion: does not exist in peer-type return nil } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go index 9112863e35..be93930441 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go @@ -212,6 +212,15 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned // KubeProxy defines managed attributes of the kube-proxy daemonset KubeProxy KubeProxy `json:"kubeProxy,omitempty"` + + // The cluster upgrade policy to use for the cluster. + // (Official AWS docs for this policy: https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + // `extended` upgrade policy indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. You will incur extended support charges with this setting. You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + // `standard` upgrade policy indicates that the cluster is eligible for automatic upgrade at the end of standard support. You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + // If omitted, new clusters will use the AWS default upgrade policy (which at the time of writing is "extended") and existing clusters will have their upgrade policy unchanged. + // +kubebuilder:validation:Enum=extended;standard + // +optional + UpgradePolicy UpgradePolicy `json:"upgradePolicy,omitempty"` } // KubeProxy specifies how the kube-proxy daemonset is managed. diff --git a/controlplane/eks/api/v1beta2/types.go b/controlplane/eks/api/v1beta2/types.go index 79f58f8e77..60cd4b454d 100644 --- a/controlplane/eks/api/v1beta2/types.go +++ b/controlplane/eks/api/v1beta2/types.go @@ -241,6 +241,24 @@ type AddonIssue struct { ResourceIDs []string `json:"resourceIds,omitempty"` } +// UpgradePolicy defines the support policy to use for the cluster. +type UpgradePolicy string + +var ( + // UpgradePolicyExtended indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. + // You will incur extended support charges with this setting. + // You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + UpgradePolicyExtended = UpgradePolicy("extended") + + // UpgradePolicyStandard indicates that the cluster is eligible for automatic upgrade at the end of standard support. + // You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + UpgradePolicyStandard = UpgradePolicy("standard") +) + +func (e UpgradePolicy) String() string { + return string(e) +} + const ( // SecurityGroupCluster is the security group for communication between EKS // control plane and managed node groups. diff --git a/docs/book/src/topics/eks/creating-a-cluster.md b/docs/book/src/topics/eks/creating-a-cluster.md index 0ef75009c6..7ec523837f 100644 --- a/docs/book/src/topics/eks/creating-a-cluster.md +++ b/docs/book/src/topics/eks/creating-a-cluster.md @@ -14,6 +14,9 @@ clusterctl generate cluster capi-eks-quickstart --flavor eks-managedmachinepool NOTE: When creating an EKS cluster only the **MAJOR.MINOR** of the `-kubernetes-version` is taken into consideration. +By default CAPA relies on the default EKS cluster upgrade policy, which at the moment of writing is EXTENDED support. +See more info about [cluster upgrade policy](https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + ## Kubeconfig When creating an EKS cluster 2 kubeconfigs are generated and stored as secrets in the management cluster. This is different to when you create a non-managed cluster using the AWS provider. diff --git a/pkg/cloud/converters/eks.go b/pkg/cloud/converters/eks.go index fbb35c67c3..56fb464abd 100644 --- a/pkg/cloud/converters/eks.go +++ b/pkg/cloud/converters/eks.go @@ -278,3 +278,11 @@ func AddonConflictResolutionFromSDK(conflict ekstypes.ResolveConflicts) *string } return aws.String(string(ekscontrolplanev1.AddonResolutionOverwrite)) } + +// SupportTypeToSDK converts CAPA upgrade support policy types to SDK types. +func SupportTypeToSDK(input ekscontrolplanev1.UpgradePolicy) ekstypes.SupportType { + if input == ekscontrolplanev1.UpgradePolicyStandard { + return ekstypes.SupportTypeStandard + } + return ekstypes.SupportTypeExtended +} diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index b1b480e0b2..87f68755e6 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -35,6 +36,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cidr" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cmp" @@ -147,23 +149,7 @@ func (s *Service) reconcileCluster(ctx context.Context) error { // computeCurrentStatusVersion returns the computed current EKS cluster kubernetes version. // The computation has awareness of the fact that EKS clusters only return a major.minor kubernetes version, // and returns a compatible version for te status according to the one the user specified in the spec. -func computeCurrentStatusVersion(specV *string, clusterV *string) *string { - specVersion := "" - if specV != nil { - specVersion = *specV - } - - clusterVersion := "" - if clusterV != nil { - clusterVersion = *clusterV - } - - // Ignore parsing errors as these are already validated by the kubebuilder validation and the AWS API. - // Also specVersion might not be specified in the spec.Version for AWSManagedControlPlane, this results in a "0.0.0" version. - // Also clusterVersion might not yet be returned by the AWS EKS API, as the cluster might still be initializing, this results in a "0.0.0" version. - specSemverVersion, _ := semver.ParseTolerant(specVersion) - currentSemverVersion, _ := semver.ParseTolerant(clusterVersion) - +func computeCurrentStatusVersion(clusterV *string, specSemverVersion semver.Version, currentSemverVersion semver.Version) *string { // If AWS EKS API is not returning a version, set the status.Version to empty string. if currentSemverVersion.String() == "0.0.0" { return ptr.To("") @@ -187,9 +173,27 @@ func computeCurrentStatusVersion(specV *string, clusterV *string) *string { return clusterV } +// parseClusterVersionString parse a version string to semver version. +// If the string cannot be parsed to semver, returning 0.0.0. +func parseClusterVersionString(str *string) semver.Version { + version := "" + if str != nil { + version = *str + } + + // Ignore parsing errors as these are already validated by the kubebuilder validation and the AWS API. + semverVersion, _ := semver.ParseTolerant(version) + return semverVersion +} + func (s *Service) setStatus(cluster *ekstypes.Cluster) error { + // specSemver might not be specified in the spec.Version for AWSManagedControlPlane, this results in a "0.0.0" version. + specSemver := parseClusterVersionString(s.scope.ControlPlane.Spec.Version) + // clusterSemver might not yet be returned by the AWS EKS API, as the cluster might still be initializing, this results in a "0.0.0" version. + clusterSemver := parseClusterVersionString(cluster.Version) + // Set the current Kubernetes control plane version in the status. - s.scope.ControlPlane.Status.Version = computeCurrentStatusVersion(s.scope.ControlPlane.Spec.Version, cluster.Version) + s.scope.ControlPlane.Status.Version = computeCurrentStatusVersion(cluster.Version, specSemver, clusterSemver) // Set the current cluster status in the control plane status. switch cluster.Status { @@ -211,6 +215,19 @@ func (s *Service) setStatus(cluster *ekstypes.Cluster) error { conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition, "updated", clusterv1.ConditionSeverityInfo, "") record.Eventf(s.scope.ControlPlane, "SuccessfulUpdateEKSControlPlane", "Updated EKS control plane %s", s.scope.KubernetesClusterName()) } + if s.scope.ControlPlane.Spec.UpgradePolicy == ekscontrolplanev1.UpgradePolicyStandard && + (specSemver.Major < clusterSemver.Major || + (specSemver.Major == clusterSemver.Major && specSemver.Minor < clusterSemver.Minor)) { + s.scope.ControlPlane.Status.Ready = false + failureMsg := fmt.Sprintf( + "EKS control plane %s was automatically upgraded to version %s because %s is out of standard support. "+ + "This can be fixed by changing to the version of the AWSManagedControlPlane to the one reported in the status", + s.scope.KubernetesClusterName(), + clusterSemver.String(), + specSemver.String(), + ) + s.scope.ControlPlane.Status.FailureMessage = &failureMsg + } // TODO FailureReason case ekstypes.ClusterStatusCreating: s.scope.ControlPlane.Status.Ready = false @@ -478,6 +495,14 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek eksVersion = &v } + var upgradePolicy *ekstypes.UpgradePolicyRequest + + if s.scope.ControlPlane.Spec.UpgradePolicy != "" { + upgradePolicy = &ekstypes.UpgradePolicyRequest{ + SupportType: converters.SupportTypeToSDK(s.scope.ControlPlane.Spec.UpgradePolicy), + } + } + bootstrapAddon := s.scope.BootstrapSelfManagedAddons() input := &eks.CreateClusterInput{ Name: aws.String(eksClusterName), @@ -490,6 +515,7 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek Tags: tags, KubernetesNetworkConfig: netConfig, BootstrapSelfManagedAddons: bootstrapAddon, + UpgradePolicy: upgradePolicy, } var out *eks.CreateClusterOutput @@ -545,6 +571,11 @@ func (s *Service) reconcileClusterConfig(ctx context.Context, cluster *ekstypes. input.ResourcesVpcConfig = updateVpcConfig } + if updateUpgradePolicy := s.reconcileUpgradePolicy(cluster.UpgradePolicy); updateUpgradePolicy != nil { + needsUpdate = true + input.UpgradePolicy = updateUpgradePolicy + } + if needsUpdate { if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { @@ -782,6 +813,26 @@ func (s *Service) reconcileClusterVersion(ctx context.Context, cluster *ekstypes return nil } +func (s *Service) reconcileUpgradePolicy(upgradePolicy *ekstypes.UpgradePolicyResponse) *ekstypes.UpgradePolicyRequest { + // Should not update when cluster upgrade policy is unknown + if upgradePolicy == nil { + return nil + } + + // Cluster stay unchanged when upgrade policy omitted + if s.scope.ControlPlane.Spec.UpgradePolicy == "" { + return nil + } + + if strings.ToLower(string(upgradePolicy.SupportType)) == s.scope.ControlPlane.Spec.UpgradePolicy.String() { + return nil + } + + return &ekstypes.UpgradePolicyRequest{ + SupportType: converters.SupportTypeToSDK(s.scope.ControlPlane.Spec.UpgradePolicy), + } +} + func (s *Service) describeEKSCluster(ctx context.Context, eksClusterName string) (*ekstypes.Cluster, error) { input := &eks.DescribeClusterInput{ Name: aws.String(eksClusterName), diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go index b120226697..25d69aaaf8 100644 --- a/pkg/cloud/services/eks/cluster_test.go +++ b/pkg/cloud/services/eks/cluster_test.go @@ -652,6 +652,7 @@ func TestCreateCluster(t *testing.T) { RoleName: tc.role, NetworkSpec: infrav1.NetworkSpec{Subnets: tc.subnets}, BootstrapSelfManagedAddons: false, + UpgradePolicy: ekscontrolplanev1.UpgradePolicyStandard, }, }, }) @@ -674,6 +675,9 @@ func TestCreateCluster(t *testing.T) { Tags: tc.tags, Version: version, BootstrapSelfManagedAddons: aws.Bool(false), + UpgradePolicy: &ekstypes.UpgradePolicyRequest{ + SupportType: ekstypes.SupportTypeStandard, + }, }).Return(&eks.CreateClusterOutput{}, nil) } s := NewService(scope) @@ -805,6 +809,91 @@ func TestReconcileEKSEncryptionConfig(t *testing.T) { } } +func TestReconcileUpgradePolicy(t *testing.T) { + clusterName := "default.cluster" + tests := []struct { + name string + oldUpgradePolicy *ekstypes.UpgradePolicyResponse + newUpgradePolicy ekscontrolplanev1.UpgradePolicy + expect *ekstypes.UpgradePolicyRequest + expectError bool + }{ + { + name: "no update necessary - upgrade policy omitted", + oldUpgradePolicy: &ekstypes.UpgradePolicyResponse{ + SupportType: ekstypes.SupportTypeStandard, + }, + expect: nil, + expectError: false, + }, + { + name: "no update necessary - cannot get cluster upgrade policy", + newUpgradePolicy: ekscontrolplanev1.UpgradePolicyStandard, + expect: nil, + expectError: false, + }, + { + name: "no update necessary - upgrade policy unchanged", + oldUpgradePolicy: &ekstypes.UpgradePolicyResponse{ + SupportType: ekstypes.SupportTypeStandard, + }, + newUpgradePolicy: ekscontrolplanev1.UpgradePolicyStandard, + expect: nil, + expectError: false, + }, + { + name: "needs update", + oldUpgradePolicy: &ekstypes.UpgradePolicyResponse{ + SupportType: ekstypes.SupportTypeStandard, + }, + newUpgradePolicy: ekscontrolplanev1.UpgradePolicyExtended, + expect: &ekstypes.UpgradePolicyRequest{ + SupportType: ekstypes.SupportTypeExtended, + }, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + Version: aws.String("1.16"), + UpgradePolicy: tc.newUpgradePolicy, + }, + }, + }) + g.Expect(err).To(BeNil()) + + s := NewService(scope) + + upgradePolicyRequest := s.reconcileUpgradePolicy(tc.oldUpgradePolicy) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(upgradePolicyRequest).To(Equal(tc.expect)) + }) + } +} + func TestCreateIPv6Cluster(t *testing.T) { g := NewWithT(t) diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml index b8230eace7..8271d1b529 100644 --- a/test/e2e/data/e2e_eks_conf.yaml +++ b/test/e2e/data/e2e_eks_conf.yaml @@ -112,6 +112,8 @@ providers: targetName: "cluster-template-eks-managedmachinepool.yaml" - sourcePath: "./eks/cluster-template-eks-ipv6-cluster.yaml" targetName: "cluster-template-eks-ipv6-cluster.yaml" + - sourcePath: "./eks/cluster-template-eks-upgrade-policy.yaml" + targetName: "cluster-template-eks-upgrade-policy.yaml" - sourcePath: "./eks/cluster-template-eks-control-plane-only-legacy.yaml" targetName: "cluster-template-eks-control-plane-only-legacy.yaml" - sourcePath: "./eks/cluster-template-eks-control-plane-bare-eks.yaml" diff --git a/test/e2e/data/eks/cluster-template-eks-upgrade-policy.yaml b/test/e2e/data/eks/cluster-template-eks-upgrade-policy.yaml new file mode 100644 index 0000000000..16db15c010 --- /dev/null +++ b/test/e2e/data/eks/cluster-template-eks-upgrade-policy.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" +--- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- +kind: AWSManagedControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + region: "${AWS_REGION}" + version: "${KUBERNETES_VERSION}" + upgradePolicy: "${UPGRADE_POLICY}" + identityRef: + kind: AWSClusterStaticIdentity + name: e2e-account diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index 4b7adf22c6..48d8417f57 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -69,6 +69,7 @@ const ( MultiTenancy = "MULTI_TENANCY_" EksUpgradeFromVersion = "UPGRADE_FROM_VERSION" EksUpgradeToVersion = "UPGRADE_TO_VERSION" + UpgradePolicy = "UPGRADE_POLICY" ClassicElbTestKubernetesFrom = "CLASSICELB_TEST_KUBERNETES_VERSION_FROM" ClassicElbTestKubernetesTo = "CLASSICELB_TEST_KUBERNETES_VERSION_TO" diff --git a/test/e2e/suites/managed/eks_upgrade_policy_test.go b/test/e2e/suites/managed/eks_upgrade_policy_test.go new file mode 100644 index 0000000000..16208f4b90 --- /dev/null +++ b/test/e2e/suites/managed/eks_upgrade_policy_test.go @@ -0,0 +1,137 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managed + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util" +) + +// EKS upgrade policy test. +var _ = ginkgo.Describe("EKS upgrade policy test", func() { + var ( + namespace *corev1.Namespace + ctx context.Context + specName = "cluster" + clusterName string + ) + + ginkgo.It("[managed] [upgrade-policy] Able to update cluster upgrade policy from STANDARD to EXTENDED", func() { + ginkgo.By("should have a valid test configuration") + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + + upgradePolicy := ekscontrolplanev1.UpgradePolicyStandard + shared.SetEnvVar(shared.UpgradePolicy, upgradePolicy.String(), false) + + ctx = context.TODO() + namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) + clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + eksClusterName := getEKSClusterName(namespace.Name, clusterName) + + ginkgo.By("default iam role should exist") + VerifyRoleExistsAndOwned(ctx, ekscontrolplanev1.DefaultEKSControlPlaneRole, eksClusterName, false, e2eCtx.AWSSession) + + getManagedClusterSpec := func() ManagedClusterSpecInput { + return ManagedClusterSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + Flavour: EKSUpgradePolicyFlavor, + ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error + WorkerMachineCount: 0, + } + } + + ginkgo.By("should create an EKS control plane") + ManagedClusterSpec(ctx, getManagedClusterSpec) + + ginkgo.By(fmt.Sprintf("getting cluster with name %s", clusterName)) + cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + Name: clusterName, + }) + Expect(cluster).NotTo(BeNil(), "couldn't find cluster") + + WaitForEKSClusterUpgradePolicy(ctx, e2eCtx.BootstrapUserAWSSession, eksClusterName, upgradePolicy) + + changedUpgradePolicy := ekscontrolplanev1.UpgradePolicyExtended + ginkgo.By(fmt.Sprintf("Changing the UpgradePolicy from %s to %s", upgradePolicy, changedUpgradePolicy)) + shared.SetEnvVar(shared.UpgradePolicy, changedUpgradePolicy.String(), false) + ManagedClusterSpec(ctx, getManagedClusterSpec) + WaitForEKSClusterUpgradePolicy(ctx, e2eCtx.BootstrapUserAWSSession, eksClusterName, changedUpgradePolicy) + + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + Cluster: cluster, + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + ArtifactFolder: e2eCtx.Settings.ArtifactFolder, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...) + }) +}) + +func WaitForEKSClusterUpgradePolicy(ctx context.Context, sess *aws.Config, eksClusterName string, upgradePolicy ekscontrolplanev1.UpgradePolicy) { + ginkgo.By(fmt.Sprintf("Checking EKS control plane upgrade policy matches %s", upgradePolicy)) + Eventually(func() error { + cluster, err := getEKSCluster(ctx, eksClusterName, sess) + if err != nil { + smithyErr := awserrors.ParseSmithyError(err) + notFoundErr := &ekstypes.ResourceNotFoundException{} + if smithyErr.ErrorCode() == notFoundErr.ErrorCode() { + // Unrecoverable error stop trying and fail early. + return StopTrying(fmt.Sprintf("unrecoverable error: cluster %q not found: %s", eksClusterName, smithyErr.ErrorMessage())) + } + return err // For transient errors, retry + } + + expectedPolicy := converters.SupportTypeToSDK(upgradePolicy) + actualPolicy := cluster.UpgradePolicy.SupportType + + if actualPolicy != expectedPolicy { + // The upgrade policy change hasn't been reflected in EKS yet, error and try again. + return fmt.Errorf("upgrade policy mismatch: expected %s, but found %s", expectedPolicy, actualPolicy) + } + + // Success in finding the change has been reflected in EKS. + return nil + }, 5*time.Minute, 10*time.Second).Should(Succeed(), fmt.Sprintf("eventually failed checking EKS Cluster %q upgrade policy is %s", eksClusterName, upgradePolicy)) +} diff --git a/test/e2e/suites/managed/helpers.go b/test/e2e/suites/managed/helpers.go index 926d914248..dce878d89a 100644 --- a/test/e2e/suites/managed/helpers.go +++ b/test/e2e/suites/managed/helpers.go @@ -48,6 +48,7 @@ const ( EKSManagedMachinePoolWithLaunchTemplateOnlyFlavor = "eks-managed-machinepool-with-launch-template-only" EKSMachinePoolOnlyFlavor = "eks-machinepool-only" EKSIPv6ClusterFlavor = "eks-ipv6-cluster" + EKSUpgradePolicyFlavor = "eks-upgrade-policy" EKSControlPlaneOnlyLegacyFlavor = "eks-control-plane-only-legacy" EKSClusterClassFlavor = "eks-clusterclass" EKSAuthAPIAndConfigMapFlavor = "eks-auth-api-and-config-map" @@ -105,6 +106,10 @@ func getEKSCluster(ctx context.Context, eksClusterName string, sess *aws.Config) } result, err := eksClient.DescribeCluster(ctx, input) + if err != nil { + return nil, err + } + return result.Cluster, err }