From c38276b82e7cb06128e55871468c5dc2b9718c33 Mon Sep 17 00:00:00 2001 From: faiq Date: Tue, 23 Sep 2025 09:21:55 -0700 Subject: [PATCH 01/18] ci: adds unit tests --- .github/workflows/unit-test.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .github/workflows/unit-test.yml diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml new file mode 100644 index 0000000000..916b52cb96 --- /dev/null +++ b/.github/workflows/unit-test.yml @@ -0,0 +1,26 @@ +name: PR run unit and integration tests + +on: + pull_request_target: + types: + - edited + - labeled + - reopened + - synchronize + +jobs: + approve: + name: runs unit and integration tests + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493 # tag=v4.2.2 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # tag=v6.0.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: Run runs-test + run: make test From 97049cec6af8dfa508e4fbbfbf22861a1cef8541 Mon Sep 17 00:00:00 2001 From: faiq Date: Wed, 10 Sep 2025 14:52:18 -0700 Subject: [PATCH 02/18] feat: starts bootstrapping new type with kubebuilder --- bootstrap/eks/PROJECT | 2 +- .../eks/api/v1beta2/nodeadmconfig_types.go | 48 +++++ .../api/v1beta2/nodeadmconfigtemplate_type.go | 56 ++++++ .../eks/api/v1beta2/zz_generated.deepcopy.go | 179 ++++++++++++++++++ .../controllers/nodeadmconfig_controller.go | 47 +++++ .../nodeadmconfig_controller_test.go | 68 +++++++ ...strap.cluster.x-k8s.io_nodeadmconfigs.yaml | 54 ++++++ ...uster.x-k8s.io_nodeadmconfigtemplates.yaml | 65 +++++++ config/crd/kustomization.yaml | 1 + config/rbac/role.yaml | 19 ++ main.go | 8 + 11 files changed, 546 insertions(+), 1 deletion(-) create mode 100644 bootstrap/eks/api/v1beta2/nodeadmconfig_types.go create mode 100644 bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go create mode 100644 bootstrap/eks/controllers/nodeadmconfig_controller.go create mode 100644 bootstrap/eks/controllers/nodeadmconfig_controller_test.go create mode 100644 config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml create mode 100644 config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml diff --git a/bootstrap/eks/PROJECT b/bootstrap/eks/PROJECT index aad25560b3..c72ac79c10 100644 --- a/bootstrap/eks/PROJECT +++ b/bootstrap/eks/PROJECT @@ -15,4 +15,4 @@ resources: - group: bootstrap kind: EKSConfigTemplate version: v1beta2 -version: "2" +version: "3" diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go new file mode 100644 index 0000000000..3415b12e36 --- /dev/null +++ b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go @@ -0,0 +1,48 @@ +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NodeadmConfigSpec defines the desired state of NodeadmConfig. +type NodeadmConfigSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Foo is an example field of NodeadmConfig. Edit nodeadmconfig_types.go to remove/update + Foo string `json:"foo,omitempty"` +} + +// NodeadmConfigStatus defines the observed state of NodeadmConfig. +type NodeadmConfigStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NodeadmConfig is the Schema for the nodeadmconfigs API. +type NodeadmConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NodeadmConfigSpec `json:"spec,omitempty"` + Status NodeadmConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NodeadmConfigList contains a list of NodeadmConfig. +type NodeadmConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodeadmConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NodeadmConfig{}, &NodeadmConfigList{}) +} diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go b/bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go new file mode 100644 index 0000000000..da38328ad9 --- /dev/null +++ b/bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NodeadmConfigTemplateSpec defines the desired state of templated NodeadmConfig Amazon EKS Configuration resources. +type NodeadmConfigTemplateSpec struct { + Template NodeadmConfigTemplateResource `json:"template"` +} + +// NodeadmConfigTemplateResource defines the Template structure. +type NodeadmConfigTemplateResource struct { + Spec NodeadmConfigSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=nodeadmconfigtemplates,scope=Namespaced,categories=cluster-api,shortName=eksct +// +kubebuilder:storageversion + +// NodeadmConfigTemplate is the Amazon EKS Bootstrap Configuration Template API. +type NodeadmConfigTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NodeadmConfigTemplateSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true + +// NodeadmConfigTemplateList contains a list of Amazon EKS Bootstrap Configuration Templates. +type NodeadmConfigTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodeadmConfigTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NodeadmConfigTemplate{}, &NodeadmConfigTemplateList{}) +} diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go index 7b059799a7..ee6746cb0d 100644 --- a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go @@ -447,6 +447,185 @@ func (in *NTP) DeepCopy() *NTP { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfig) DeepCopyInto(out *NodeadmConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfig. +func (in *NodeadmConfig) DeepCopy() *NodeadmConfig { + if in == nil { + return nil + } + out := new(NodeadmConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeadmConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfigList) DeepCopyInto(out *NodeadmConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeadmConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigList. +func (in *NodeadmConfigList) DeepCopy() *NodeadmConfigList { + if in == nil { + return nil + } + out := new(NodeadmConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeadmConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfigSpec) DeepCopyInto(out *NodeadmConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigSpec. +func (in *NodeadmConfigSpec) DeepCopy() *NodeadmConfigSpec { + if in == nil { + return nil + } + out := new(NodeadmConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfigStatus) DeepCopyInto(out *NodeadmConfigStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigStatus. +func (in *NodeadmConfigStatus) DeepCopy() *NodeadmConfigStatus { + if in == nil { + return nil + } + out := new(NodeadmConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfigTemplate) DeepCopyInto(out *NodeadmConfigTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigTemplate. +func (in *NodeadmConfigTemplate) DeepCopy() *NodeadmConfigTemplate { + if in == nil { + return nil + } + out := new(NodeadmConfigTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeadmConfigTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfigTemplateList) DeepCopyInto(out *NodeadmConfigTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeadmConfigTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigTemplateList. +func (in *NodeadmConfigTemplateList) DeepCopy() *NodeadmConfigTemplateList { + if in == nil { + return nil + } + out := new(NodeadmConfigTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeadmConfigTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfigTemplateResource) DeepCopyInto(out *NodeadmConfigTemplateResource) { + *out = *in + out.Spec = in.Spec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigTemplateResource. +func (in *NodeadmConfigTemplateResource) DeepCopy() *NodeadmConfigTemplateResource { + if in == nil { + return nil + } + out := new(NodeadmConfigTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeadmConfigTemplateSpec) DeepCopyInto(out *NodeadmConfigTemplateSpec) { + *out = *in + out.Template = in.Template +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigTemplateSpec. +func (in *NodeadmConfigTemplateSpec) DeepCopy() *NodeadmConfigTemplateSpec { + if in == nil { + return nil + } + out := new(NodeadmConfigTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Partition) DeepCopyInto(out *Partition) { *out = *in diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go new file mode 100644 index 0000000000..45b416997b --- /dev/null +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -0,0 +1,47 @@ +package controllers + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" +) + +// NodeadmConfigReconciler reconciles a NodeadmConfig object +type NodeadmConfigReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=nodeadmconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=nodeadmconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=nodeadmconfigs/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the NodeadmConfig object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/reconcile +func (r *NodeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = logf.FromContext(ctx) + + // TODO(user): your logic here + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NodeadmConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&eksbootstrapv1.NodeadmConfig{}). + Named("nodeadmconfig"). + Complete(r) +} diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller_test.go b/bootstrap/eks/controllers/nodeadmconfig_controller_test.go new file mode 100644 index 0000000000..dae4dfdaa3 --- /dev/null +++ b/bootstrap/eks/controllers/nodeadmconfig_controller_test.go @@ -0,0 +1,68 @@ +package controllers + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + bootstrapv1beta2 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta2" +) + +var _ = Describe("NodeadmConfig Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + nodeadmconfig := &bootstrapv1beta2.NodeadmConfig{} + + BeforeEach(func() { + By("creating the custom resource for the Kind NodeadmConfig") + err := k8sClient.Get(ctx, typeNamespacedName, nodeadmconfig) + if err != nil && errors.IsNotFound(err) { + resource := &bootstrapv1beta2.NodeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &bootstrapv1beta2.NodeadmConfig{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance NodeadmConfig") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &NodeadmConfigReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml new file mode 100644 index 0000000000..4d58f9ddcf --- /dev/null +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: nodeadmconfigs.bootstrap.cluster.x-k8s.io +spec: + group: bootstrap.cluster.x-k8s.io + names: + kind: NodeadmConfig + listKind: NodeadmConfigList + plural: nodeadmconfigs + singular: nodeadmconfig + scope: Namespaced + versions: + - name: v1beta2 + schema: + openAPIV3Schema: + description: NodeadmConfig is the Schema for the nodeadmconfigs API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodeadmConfigSpec defines the desired state of NodeadmConfig. + properties: + foo: + description: Foo is an example field of NodeadmConfig. Edit nodeadmconfig_types.go + to remove/update + type: string + type: object + status: + description: NodeadmConfigStatus defines the observed state of NodeadmConfig. + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml new file mode 100644 index 0000000000..de2254d544 --- /dev/null +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: nodeadmconfigtemplates.bootstrap.cluster.x-k8s.io +spec: + group: bootstrap.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: NodeadmConfigTemplate + listKind: NodeadmConfigTemplateList + plural: nodeadmconfigtemplates + shortNames: + - eksct + singular: nodeadmconfigtemplate + scope: Namespaced + versions: + - name: v1beta2 + schema: + openAPIV3Schema: + description: NodeadmConfigTemplate is the Amazon EKS Bootstrap Configuration + Template API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodeadmConfigTemplateSpec defines the desired state of templated + NodeadmConfig Amazon EKS Configuration resources. + properties: + template: + description: NodeadmConfigTemplateResource defines the Template structure. + properties: + spec: + description: NodeadmConfigSpec defines the desired state of NodeadmConfig. + properties: + foo: + description: Foo is an example field of NodeadmConfig. Edit + nodeadmconfig_types.go to remove/update + type: string + type: object + type: object + required: + - template + type: object + type: object + served: true + storage: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c3f6177556..b0bf490a5d 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -22,6 +22,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml - bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml - bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml +- bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml - bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml - bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml - bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a2ed671ffb..c45d7e4029 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -62,10 +62,29 @@ rules: - bootstrap.cluster.x-k8s.io resources: - eksconfigs/status + - nodeadmconfigs/status verbs: - get - patch - update +- apiGroups: + - bootstrap.cluster.x-k8s.io + resources: + - nodeadmconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - bootstrap.cluster.x-k8s.io + resources: + - nodeadmconfigs/finalizers + verbs: + - update - apiGroups: - cluster.x-k8s.io resources: diff --git a/main.go b/main.go index 8aac35b373..3071494614 100644 --- a/main.go +++ b/main.go @@ -451,6 +451,14 @@ func setupEKSReconcilersAndWebhooks(ctx context.Context, mgr ctrl.Manager, os.Exit(1) } + if err := (&eksbootstrapcontrollers.NodeadmConfigReconciler{ + Client: mgr.GetClient(), + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "EKSConfig") + os.Exit(1) + } + setupLog.Debug("enabling EKS managed cluster controller") if err := (&controllers.AWSManagedClusterReconciler{ Client: mgr.GetClient(), From 7e42d77f8850aebe7546afcbdc36c9a1e6a711d5 Mon Sep 17 00:00:00 2001 From: faiq Date: Wed, 10 Sep 2025 16:08:27 -0700 Subject: [PATCH 03/18] feat: adds fields to nodeadm config --- bootstrap/eks/api/v1beta2/eksconfig_types.go | 197 --------- .../eks/api/v1beta2/nodeadmconfig_types.go | 154 ++++++- bootstrap/eks/api/v1beta2/types.go | 198 +++++++++ .../eks/api/v1beta2/zz_generated.deepcopy.go | 169 +++++++- ...strap.cluster.x-k8s.io_nodeadmconfigs.yaml | 380 +++++++++++++++++- ...uster.x-k8s.io_nodeadmconfigtemplates.yaml | 318 ++++++++++++++- 6 files changed, 1200 insertions(+), 216 deletions(-) create mode 100644 bootstrap/eks/api/v1beta2/types.go diff --git a/bootstrap/eks/api/v1beta2/eksconfig_types.go b/bootstrap/eks/api/v1beta2/eksconfig_types.go index a2fce8e2cb..6cb7b31143 100644 --- a/bootstrap/eks/api/v1beta2/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta2/eksconfig_types.go @@ -110,203 +110,6 @@ type EKSConfigStatus struct { Conditions clusterv1.Conditions `json:"conditions,omitempty"` } -// Encoding specifies the cloud-init file encoding. -// +kubebuilder:validation:Enum=base64;gzip;gzip+base64 -type Encoding string - -const ( - // Base64 implies the contents of the file are encoded as base64. - Base64 Encoding = "base64" - // Gzip implies the contents of the file are encoded with gzip. - Gzip Encoding = "gzip" - // GzipBase64 implies the contents of the file are first base64 encoded and then gzip encoded. - GzipBase64 Encoding = "gzip+base64" -) - -// File defines the input for generating write_files in cloud-init. -type File struct { - // Path specifies the full path on disk where to store the file. - Path string `json:"path"` - - // Owner specifies the ownership of the file, e.g. "root:root". - // +optional - Owner string `json:"owner,omitempty"` - - // Permissions specifies the permissions to assign to the file, e.g. "0640". - // +optional - Permissions string `json:"permissions,omitempty"` - - // Encoding specifies the encoding of the file contents. - // +optional - Encoding Encoding `json:"encoding,omitempty"` - - // Append specifies whether to append Content to existing file if Path exists. - // +optional - Append bool `json:"append,omitempty"` - - // Content is the actual content of the file. - // +optional - Content string `json:"content,omitempty"` - - // ContentFrom is a referenced source of content to populate the file. - // +optional - ContentFrom *FileSource `json:"contentFrom,omitempty"` -} - -// FileSource is a union of all possible external source types for file data. -// Only one field may be populated in any given instance. Developers adding new -// sources of data for target systems should add them here. -type FileSource struct { - // Secret represents a secret that should populate this file. - Secret SecretFileSource `json:"secret"` -} - -// SecretFileSource adapts a Secret into a FileSource. -// -// The contents of the target Secret's Data field will be presented -// as files using the keys in the Data field as the file names. -type SecretFileSource struct { - // Name of the secret in the KubeadmBootstrapConfig's namespace to use. - Name string `json:"name"` - - // Key is the key in the secret's data map for this value. - Key string `json:"key"` -} - -// PasswdSource is a union of all possible external source types for passwd data. -// Only one field may be populated in any given instance. Developers adding new -// sources of data for target systems should add them here. -type PasswdSource struct { - // Secret represents a secret that should populate this password. - Secret SecretPasswdSource `json:"secret"` -} - -// SecretPasswdSource adapts a Secret into a PasswdSource. -// -// The contents of the target Secret's Data field will be presented -// as passwd using the keys in the Data field as the file names. -type SecretPasswdSource struct { - // Name of the secret in the KubeadmBootstrapConfig's namespace to use. - Name string `json:"name"` - - // Key is the key in the secret's data map for this value. - Key string `json:"key"` -} - -// User defines the input for a generated user in cloud-init. -type User struct { - // Name specifies the username - Name string `json:"name"` - - // Gecos specifies the gecos to use for the user - // +optional - Gecos *string `json:"gecos,omitempty"` - - // Groups specifies the additional groups for the user - // +optional - Groups *string `json:"groups,omitempty"` - - // HomeDir specifies the home directory to use for the user - // +optional - HomeDir *string `json:"homeDir,omitempty"` - - // Inactive specifies whether to mark the user as inactive - // +optional - Inactive *bool `json:"inactive,omitempty"` - - // Shell specifies the user's shell - // +optional - Shell *string `json:"shell,omitempty"` - - // Passwd specifies a hashed password for the user - // +optional - Passwd *string `json:"passwd,omitempty"` - - // PasswdFrom is a referenced source of passwd to populate the passwd. - // +optional - PasswdFrom *PasswdSource `json:"passwdFrom,omitempty"` - - // PrimaryGroup specifies the primary group for the user - // +optional - PrimaryGroup *string `json:"primaryGroup,omitempty"` - - // LockPassword specifies if password login should be disabled - // +optional - LockPassword *bool `json:"lockPassword,omitempty"` - - // Sudo specifies a sudo role for the user - // +optional - Sudo *string `json:"sudo,omitempty"` - - // SSHAuthorizedKeys specifies a list of ssh authorized keys for the user - // +optional - SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"` -} - -// NTP defines input for generated ntp in cloud-init. -type NTP struct { - // Servers specifies which NTP servers to use - // +optional - Servers []string `json:"servers,omitempty"` - - // Enabled specifies whether NTP should be enabled - // +optional - Enabled *bool `json:"enabled,omitempty"` -} - -// DiskSetup defines input for generated disk_setup and fs_setup in cloud-init. -type DiskSetup struct { - // Partitions specifies the list of the partitions to setup. - // +optional - Partitions []Partition `json:"partitions,omitempty"` - - // Filesystems specifies the list of file systems to setup. - // +optional - Filesystems []Filesystem `json:"filesystems,omitempty"` -} - -// Partition defines how to create and layout a partition. -type Partition struct { - // Device is the name of the device. - Device string `json:"device"` - // Layout specifies the device layout. - // If it is true, a single partition will be created for the entire device. - // When layout is false, it means don't partition or ignore existing partitioning. - Layout bool `json:"layout"` - // Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. - // Use with caution. Default is 'false'. - // +optional - Overwrite *bool `json:"overwrite,omitempty"` - // TableType specifies the tupe of partition table. The following are supported: - // 'mbr': default and setups a MS-DOS partition table - // 'gpt': setups a GPT partition table - // +optional - TableType *string `json:"tableType,omitempty"` -} - -// Filesystem defines the file systems to be created. -type Filesystem struct { - // Device specifies the device name - Device string `json:"device"` - // Filesystem specifies the file system type. - Filesystem string `json:"filesystem"` - // Label specifies the file system label to be used. If set to None, no label is used. - Label string `json:"label"` - // Partition specifies the partition to use. The valid options are: "auto|any", "auto", "any", "none", and , where NUM is the actual partition number. - // +optional - Partition *string `json:"partition,omitempty"` - // Overwrite defines whether or not to overwrite any existing filesystem. - // If true, any pre-existing file system will be destroyed. Use with Caution. - // +optional - Overwrite *bool `json:"overwrite,omitempty"` - // ExtraOpts defined extra options to add to the command for creating the file system. - // +optional - ExtraOpts []string `json:"extraOpts,omitempty"` -} - -// MountPoints defines input for generated mounts in cloud-init. -type MountPoints []string - // +kubebuilder:object:root=true // +kubebuilder:resource:path=eksconfigs,scope=Namespaced,categories=cluster-api,shortName=eksc // +kubebuilder:storageversion diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go index 3415b12e36..29994c3977 100644 --- a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go +++ b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go @@ -2,6 +2,8 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -9,17 +11,157 @@ import ( // NodeadmConfigSpec defines the desired state of NodeadmConfig. type NodeadmConfigSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Kubelet contains options for kubelet. + // +optional + Kubelet *KubeletOptions `json:"kubelet,omitempty"` - // Foo is an example field of NodeadmConfig. Edit nodeadmconfig_types.go to remove/update - Foo string `json:"foo,omitempty"` + // Containerd contains options for containerd. + // +optional + Containerd *ContainerdOptions `json:"containerd,omitempty"` + + // Instance contains options for the node's operating system and devices. + // +optional + Instance *InstanceOptions `json:"instance,omitempty"` + + // FeatureGates holds key-value pairs to enable or disable application features. + // +optional + FeatureGates map[Feature]bool `json:"featureGates,omitempty"` + + // PreBootstrapCommands specifies extra commands to run before bootstrapping nodes. + // +optional + PreBootstrapCommands []string `json:"preBootstrapCommands,omitempty"` + + // Files specifies extra files to be passed to user_data upon creation. + // +optional + Files []File `json:"files,omitempty"` + + // Users specifies extra users to add. + // +optional + Users []User `json:"users,omitempty"` + + // NTP specifies NTP configuration. + // +optional + NTP *NTP `json:"ntp,omitempty"` + + // DiskSetup specifies options for the creation of partition tables and file systems on devices. + // +optional + DiskSetup *DiskSetup `json:"diskSetup,omitempty"` + + // Mounts specifies a list of mount points to be setup. + // +optional + Mounts []MountPoints `json:"mounts,omitempty"` +} + +// KubeletOptions are additional parameters passed to kubelet. +type KubeletOptions struct { + // Config is a KubeletConfiguration that will be merged with the defaults. + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Config *runtime.RawExtension `json:"config,omitempty"` + + // Flags are command-line kubelet arguments that will be appended to the defaults. + // +optional + Flags []string `json:"flags,omitempty"` +} + +// ContainerdOptions are additional parameters passed to containerd. +type ContainerdOptions struct { + // Config is an inline containerd configuration TOML that will be merged with the defaults. + // +optional + Config string `json:"config,omitempty"` + + // BaseRuntimeSpec is the OCI runtime specification upon which all containers will be based. + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + BaseRuntimeSpec *runtime.RawExtension `json:"baseRuntimeSpec,omitempty"` +} + +// InstanceOptions determines how the node's operating system and devices are configured. +type InstanceOptions struct { + // LocalStorage contains options for configuring EC2 instance stores. + // +optional + LocalStorage *LocalStorageOptions `json:"localStorage,omitempty"` +} + +// LocalStorageOptions control how EC2 instance stores are used when available. +type LocalStorageOptions struct { + // Strategy specifies how to handle an instance's local storage devices. + Strategy LocalStorageStrategy `json:"strategy"` + + // MountPath is the path where the filesystem will be mounted. + // Defaults to "/mnt/k8s-disks/". + // +optional + MountPath string `json:"mountPath,omitempty"` + + // DisabledMounts is a list of directories that will not be mounted to LocalStorage. + // By default, all mounts are enabled. + // +optional + DisabledMounts []DisabledMount `json:"disabledMounts,omitempty"` +} + +// Feature specifies which feature gate should be toggled. +// +kubebuilder:validation:Enum=InstanceIdNodeName;FastImagePull +type Feature string + +const ( + FeatureInstanceIdNodeName Feature = "InstanceIdNodeName" + FeatureFastImagePull Feature = "FastImagePull" +) + +// LocalStorageStrategy specifies how to handle an instance's local storage devices. +// +kubebuilder:validation:Enum=RAID0;RAID10;Mount +type LocalStorageStrategy string + +const ( + RAID0Strategy LocalStorageStrategy = "RAID0" + RAID10Strategy LocalStorageStrategy = "RAID10" + MountStrategy LocalStorageStrategy = "Mount" +) + +// DisabledMount specifies a directory that should not be mounted onto local storage. +// +kubebuilder:validation:Enum=Containerd;PodLogs +type DisabledMount string + +const ( + DisabledMountContainerd DisabledMount = "Containerd" + DisabledMountPodLogs DisabledMount = "PodLogs" +) + +// GetConditions returns the observations of the operational state of the NodeadmConfig resource. +func (r *NodeadmConfig) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the NodeadmConfig to the predescribed clusterv1.Conditions. +func (r *NodeadmConfig) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions } // NodeadmConfigStatus defines the observed state of NodeadmConfig. type NodeadmConfigStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Ready indicates the BootstrapData secret is ready to be consumed. + // +optional + Ready bool `json:"ready,omitempty"` + + // DataSecretName is the name of the secret that stores the bootstrap data script. + // +optional + DataSecretName *string `json:"dataSecretName,omitempty"` + + // FailureReason will be set on non-retryable errors. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // FailureMessage will be set on non-retryable errors. + // +optional + FailureMessage string `json:"failureMessage,omitempty"` + + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions defines current service state of the NodeadmConfig. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true diff --git a/bootstrap/eks/api/v1beta2/types.go b/bootstrap/eks/api/v1beta2/types.go new file mode 100644 index 0000000000..5f8589c0ee --- /dev/null +++ b/bootstrap/eks/api/v1beta2/types.go @@ -0,0 +1,198 @@ +package v1beta2 + +// Encoding specifies the cloud-init file encoding. +// +kubebuilder:validation:Enum=base64;gzip;gzip+base64 +type Encoding string + +const ( + // Base64 implies the contents of the file are encoded as base64. + Base64 Encoding = "base64" + // Gzip implies the contents of the file are encoded with gzip. + Gzip Encoding = "gzip" + // GzipBase64 implies the contents of the file are first base64 encoded and then gzip encoded. + GzipBase64 Encoding = "gzip+base64" +) + +// File defines the input for generating write_files in cloud-init. +type File struct { + // Path specifies the full path on disk where to store the file. + Path string `json:"path"` + + // Owner specifies the ownership of the file, e.g. "root:root". + // +optional + Owner string `json:"owner,omitempty"` + + // Permissions specifies the permissions to assign to the file, e.g. "0640". + // +optional + Permissions string `json:"permissions,omitempty"` + + // Encoding specifies the encoding of the file contents. + // +optional + Encoding Encoding `json:"encoding,omitempty"` + + // Append specifies whether to append Content to existing file if Path exists. + // +optional + Append bool `json:"append,omitempty"` + + // Content is the actual content of the file. + // +optional + Content string `json:"content,omitempty"` + + // ContentFrom is a referenced source of content to populate the file. + // +optional + ContentFrom *FileSource `json:"contentFrom,omitempty"` +} + +// FileSource is a union of all possible external source types for file data. +// Only one field may be populated in any given instance. Developers adding new +// sources of data for target systems should add them here. +type FileSource struct { + // Secret represents a secret that should populate this file. + Secret SecretFileSource `json:"secret"` +} + +// SecretFileSource adapts a Secret into a FileSource. +// +// The contents of the target Secret's Data field will be presented +// as files using the keys in the Data field as the file names. +type SecretFileSource struct { + // Name of the secret in the KubeadmBootstrapConfig's namespace to use. + Name string `json:"name"` + + // Key is the key in the secret's data map for this value. + Key string `json:"key"` +} + +// PasswdSource is a union of all possible external source types for passwd data. +// Only one field may be populated in any given instance. Developers adding new +// sources of data for target systems should add them here. +type PasswdSource struct { + // Secret represents a secret that should populate this password. + Secret SecretPasswdSource `json:"secret"` +} + +// SecretPasswdSource adapts a Secret into a PasswdSource. +// +// The contents of the target Secret's Data field will be presented +// as passwd using the keys in the Data field as the file names. +type SecretPasswdSource struct { + // Name of the secret in the KubeadmBootstrapConfig's namespace to use. + Name string `json:"name"` + + // Key is the key in the secret's data map for this value. + Key string `json:"key"` +} + +// User defines the input for a generated user in cloud-init. +type User struct { + // Name specifies the username + Name string `json:"name"` + + // Gecos specifies the gecos to use for the user + // +optional + Gecos *string `json:"gecos,omitempty"` + + // Groups specifies the additional groups for the user + // +optional + Groups *string `json:"groups,omitempty"` + + // HomeDir specifies the home directory to use for the user + // +optional + HomeDir *string `json:"homeDir,omitempty"` + + // Inactive specifies whether to mark the user as inactive + // +optional + Inactive *bool `json:"inactive,omitempty"` + + // Shell specifies the user's shell + // +optional + Shell *string `json:"shell,omitempty"` + + // Passwd specifies a hashed password for the user + // +optional + Passwd *string `json:"passwd,omitempty"` + + // PasswdFrom is a referenced source of passwd to populate the passwd. + // +optional + PasswdFrom *PasswdSource `json:"passwdFrom,omitempty"` + + // PrimaryGroup specifies the primary group for the user + // +optional + PrimaryGroup *string `json:"primaryGroup,omitempty"` + + // LockPassword specifies if password login should be disabled + // +optional + LockPassword *bool `json:"lockPassword,omitempty"` + + // Sudo specifies a sudo role for the user + // +optional + Sudo *string `json:"sudo,omitempty"` + + // SSHAuthorizedKeys specifies a list of ssh authorized keys for the user + // +optional + SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"` +} + +// NTP defines input for generated ntp in cloud-init. +type NTP struct { + // Servers specifies which NTP servers to use + // +optional + Servers []string `json:"servers,omitempty"` + + // Enabled specifies whether NTP should be enabled + // +optional + Enabled *bool `json:"enabled,omitempty"` +} + +// DiskSetup defines input for generated disk_setup and fs_setup in cloud-init. +type DiskSetup struct { + // Partitions specifies the list of the partitions to setup. + // +optional + Partitions []Partition `json:"partitions,omitempty"` + + // Filesystems specifies the list of file systems to setup. + // +optional + Filesystems []Filesystem `json:"filesystems,omitempty"` +} + +// Partition defines how to create and layout a partition. +type Partition struct { + // Device is the name of the device. + Device string `json:"device"` + // Layout specifies the device layout. + // If it is true, a single partition will be created for the entire device. + // When layout is false, it means don't partition or ignore existing partitioning. + Layout bool `json:"layout"` + // Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + // Use with caution. Default is 'false'. + // +optional + Overwrite *bool `json:"overwrite,omitempty"` + // TableType specifies the tupe of partition table. The following are supported: + // 'mbr': default and setups a MS-DOS partition table + // 'gpt': setups a GPT partition table + // +optional + TableType *string `json:"tableType,omitempty"` +} + +// Filesystem defines the file systems to be created. +type Filesystem struct { + // Device specifies the device name + Device string `json:"device"` + // Filesystem specifies the file system type. + Filesystem string `json:"filesystem"` + // Label specifies the file system label to be used. If set to None, no label is used. + Label string `json:"label"` + // Partition specifies the partition to use. The valid options are: "auto|any", "auto", "any", "none", and , where NUM is the actual partition number. + // +optional + Partition *string `json:"partition,omitempty"` + // Overwrite defines whether or not to overwrite any existing filesystem. + // If true, any pre-existing file system will be destroyed. Use with Caution. + // +optional + Overwrite *bool `json:"overwrite,omitempty"` + // ExtraOpts defined extra options to add to the command for creating the file system. + // +optional + ExtraOpts []string `json:"extraOpts,omitempty"` +} + +// MountPoints defines input for generated mounts in cloud-init. +type MountPoints []string diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go index ee6746cb0d..fad9601e68 100644 --- a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go @@ -25,6 +25,26 @@ import ( "sigs.k8s.io/cluster-api/api/v1beta1" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerdOptions) DeepCopyInto(out *ContainerdOptions) { + *out = *in + if in.BaseRuntimeSpec != nil { + in, out := &in.BaseRuntimeSpec, &out.BaseRuntimeSpec + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerdOptions. +func (in *ContainerdOptions) DeepCopy() *ContainerdOptions { + if in == nil { + return nil + } + out := new(ContainerdOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiskSetup) DeepCopyInto(out *DiskSetup) { *out = *in @@ -403,6 +423,71 @@ func (in *Filesystem) DeepCopy() *Filesystem { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceOptions) DeepCopyInto(out *InstanceOptions) { + *out = *in + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(LocalStorageOptions) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceOptions. +func (in *InstanceOptions) DeepCopy() *InstanceOptions { + if in == nil { + return nil + } + out := new(InstanceOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletOptions) DeepCopyInto(out *KubeletOptions) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletOptions. +func (in *KubeletOptions) DeepCopy() *KubeletOptions { + if in == nil { + return nil + } + out := new(KubeletOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalStorageOptions) DeepCopyInto(out *LocalStorageOptions) { + *out = *in + if in.DisabledMounts != nil { + in, out := &in.DisabledMounts, &out.DisabledMounts + *out = make([]DisabledMount, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalStorageOptions. +func (in *LocalStorageOptions) DeepCopy() *LocalStorageOptions { + if in == nil { + return nil + } + out := new(LocalStorageOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in MountPoints) DeepCopyInto(out *MountPoints) { { @@ -452,8 +537,8 @@ func (in *NodeadmConfig) DeepCopyInto(out *NodeadmConfig) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfig. @@ -509,6 +594,68 @@ func (in *NodeadmConfigList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeadmConfigSpec) DeepCopyInto(out *NodeadmConfigSpec) { *out = *in + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(KubeletOptions) + (*in).DeepCopyInto(*out) + } + if in.Containerd != nil { + in, out := &in.Containerd, &out.Containerd + *out = new(ContainerdOptions) + (*in).DeepCopyInto(*out) + } + if in.Instance != nil { + in, out := &in.Instance, &out.Instance + *out = new(InstanceOptions) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[Feature]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PreBootstrapCommands != nil { + in, out := &in.PreBootstrapCommands, &out.PreBootstrapCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]File, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NTP != nil { + in, out := &in.NTP, &out.NTP + *out = new(NTP) + (*in).DeepCopyInto(*out) + } + if in.DiskSetup != nil { + in, out := &in.DiskSetup, &out.DiskSetup + *out = new(DiskSetup) + (*in).DeepCopyInto(*out) + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]MountPoints, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(MountPoints, len(*in)) + copy(*out, *in) + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigSpec. @@ -524,6 +671,18 @@ func (in *NodeadmConfigSpec) DeepCopy() *NodeadmConfigSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeadmConfigStatus) DeepCopyInto(out *NodeadmConfigStatus) { *out = *in + if in.DataSecretName != nil { + in, out := &in.DataSecretName, &out.DataSecretName + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigStatus. @@ -541,7 +700,7 @@ func (in *NodeadmConfigTemplate) DeepCopyInto(out *NodeadmConfigTemplate) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigTemplate. @@ -597,7 +756,7 @@ func (in *NodeadmConfigTemplateList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeadmConfigTemplateResource) DeepCopyInto(out *NodeadmConfigTemplateResource) { *out = *in - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigTemplateResource. @@ -613,7 +772,7 @@ func (in *NodeadmConfigTemplateResource) DeepCopy() *NodeadmConfigTemplateResour // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeadmConfigTemplateSpec) DeepCopyInto(out *NodeadmConfigTemplateSpec) { *out = *in - out.Template = in.Template + in.Template.DeepCopyInto(&out.Template) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeadmConfigTemplateSpec. diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml index 4d58f9ddcf..152b539b33 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml @@ -39,13 +39,385 @@ spec: spec: description: NodeadmConfigSpec defines the desired state of NodeadmConfig. properties: - foo: - description: Foo is an example field of NodeadmConfig. Edit nodeadmconfig_types.go - to remove/update - type: string + containerd: + description: Containerd contains options for containerd. + properties: + baseRuntimeSpec: + description: BaseRuntimeSpec is the OCI runtime specification + upon which all containers will be based. + type: object + x-kubernetes-preserve-unknown-fields: true + config: + description: Config is an inline containerd configuration TOML + that will be merged with the defaults. + type: string + type: object + diskSetup: + description: DiskSetup specifies options for the creation of partition + tables and file systems on devices. + properties: + filesystems: + description: Filesystems specifies the list of file systems to + setup. + items: + description: Filesystem defines the file systems to be created. + properties: + device: + description: Device specifies the device name + type: string + extraOpts: + description: ExtraOpts defined extra options to add to the + command for creating the file system. + items: + type: string + type: array + filesystem: + description: Filesystem specifies the file system type. + type: string + label: + description: Label specifies the file system label to be + used. If set to None, no label is used. + type: string + overwrite: + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. + type: boolean + partition: + description: 'Partition specifies the partition to use. + The valid options are: "auto|any", "auto", "any", "none", + and , where NUM is the actual partition number.' + type: string + required: + - device + - filesystem + - label + type: object + type: array + partitions: + description: Partitions specifies the list of the partitions to + setup. + items: + description: Partition defines how to create and layout a partition. + properties: + device: + description: Device is the name of the device. + type: string + layout: + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. + type: boolean + overwrite: + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. + type: boolean + tableType: + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table + type: string + required: + - device + - layout + type: object + type: array + type: object + featureGates: + additionalProperties: + type: boolean + description: FeatureGates holds key-value pairs to enable or disable + application features. + type: object + files: + description: Files specifies extra files to be passed to user_data + upon creation. + items: + description: File defines the input for generating write_files in + cloud-init. + properties: + append: + description: Append specifies whether to append Content to existing + file if Path exists. + type: boolean + content: + description: Content is the actual content of the file. + type: string + contentFrom: + description: ContentFrom is a referenced source of content to + populate the file. + properties: + secret: + description: Secret represents a secret that should populate + this file. + properties: + key: + description: Key is the key in the secret's data map + for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + encoding: + description: Encoding specifies the encoding of the file contents. + enum: + - base64 + - gzip + - gzip+base64 + type: string + owner: + description: Owner specifies the ownership of the file, e.g. + "root:root". + type: string + path: + description: Path specifies the full path on disk where to store + the file. + type: string + permissions: + description: Permissions specifies the permissions to assign + to the file, e.g. "0640". + type: string + required: + - path + type: object + type: array + instance: + description: Instance contains options for the node's operating system + and devices. + properties: + localStorage: + description: LocalStorage contains options for configuring EC2 + instance stores. + properties: + disabledMounts: + description: |- + DisabledMounts is a list of directories that will not be mounted to LocalStorage. + By default, all mounts are enabled. + items: + description: DisabledMount specifies a directory that should + not be mounted onto local storage. + enum: + - Containerd + - PodLogs + type: string + type: array + mountPath: + description: |- + MountPath is the path where the filesystem will be mounted. + Defaults to "/mnt/k8s-disks/". + type: string + strategy: + description: Strategy specifies how to handle an instance's + local storage devices. + enum: + - RAID0 + - RAID10 + - Mount + type: string + required: + - strategy + type: object + type: object + kubelet: + description: Kubelet contains options for kubelet. + properties: + config: + description: Config is a KubeletConfiguration that will be merged + with the defaults. + type: object + x-kubernetes-preserve-unknown-fields: true + flags: + description: Flags are command-line kubelet arguments that will + be appended to the defaults. + items: + type: string + type: array + type: object + mounts: + description: Mounts specifies a list of mount points to be setup. + items: + description: MountPoints defines input for generated mounts in cloud-init. + items: + type: string + type: array + type: array + ntp: + description: NTP specifies NTP configuration. + properties: + enabled: + description: Enabled specifies whether NTP should be enabled + type: boolean + servers: + description: Servers specifies which NTP servers to use + items: + type: string + type: array + type: object + preBootstrapCommands: + description: PreBootstrapCommands specifies extra commands to run + before bootstrapping nodes. + items: + type: string + type: array + users: + description: Users specifies extra users to add. + items: + description: User defines the input for a generated user in cloud-init. + properties: + gecos: + description: Gecos specifies the gecos to use for the user + type: string + groups: + description: Groups specifies the additional groups for the + user + type: string + homeDir: + description: HomeDir specifies the home directory to use for + the user + type: string + inactive: + description: Inactive specifies whether to mark the user as + inactive + type: boolean + lockPassword: + description: LockPassword specifies if password login should + be disabled + type: boolean + name: + description: Name specifies the username + type: string + passwd: + description: Passwd specifies a hashed password for the user + type: string + passwdFrom: + description: PasswdFrom is a referenced source of passwd to + populate the passwd. + properties: + secret: + description: Secret represents a secret that should populate + this password. + properties: + key: + description: Key is the key in the secret's data map + for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + primaryGroup: + description: PrimaryGroup specifies the primary group for the + user + type: string + shell: + description: Shell specifies the user's shell + type: string + sshAuthorizedKeys: + description: SSHAuthorizedKeys specifies a list of ssh authorized + keys for the user + items: + type: string + type: array + sudo: + description: Sudo specifies a sudo role for the user + type: string + required: + - name + type: object + type: array type: object status: description: NodeadmConfigStatus defines the observed state of NodeadmConfig. + properties: + conditions: + description: Conditions defines current service state of the NodeadmConfig. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + dataSecretName: + description: DataSecretName is the name of the secret that stores + the bootstrap data script. + type: string + failureMessage: + description: FailureMessage will be set on non-retryable errors. + type: string + failureReason: + description: FailureReason will be set on non-retryable errors. + type: string + observedGeneration: + description: ObservedGeneration is the latest generation observed + by the controller. + format: int64 + type: integer + ready: + description: Ready indicates the BootstrapData secret is ready to + be consumed. + type: boolean type: object type: object served: true diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml index de2254d544..30ee622704 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml @@ -51,10 +51,320 @@ spec: spec: description: NodeadmConfigSpec defines the desired state of NodeadmConfig. properties: - foo: - description: Foo is an example field of NodeadmConfig. Edit - nodeadmconfig_types.go to remove/update - type: string + containerd: + description: Containerd contains options for containerd. + properties: + baseRuntimeSpec: + description: BaseRuntimeSpec is the OCI runtime specification + upon which all containers will be based. + type: object + x-kubernetes-preserve-unknown-fields: true + config: + description: Config is an inline containerd configuration + TOML that will be merged with the defaults. + type: string + type: object + diskSetup: + description: DiskSetup specifies options for the creation + of partition tables and file systems on devices. + properties: + filesystems: + description: Filesystems specifies the list of file systems + to setup. + items: + description: Filesystem defines the file systems to + be created. + properties: + device: + description: Device specifies the device name + type: string + extraOpts: + description: ExtraOpts defined extra options to + add to the command for creating the file system. + items: + type: string + type: array + filesystem: + description: Filesystem specifies the file system + type. + type: string + label: + description: Label specifies the file system label + to be used. If set to None, no label is used. + type: string + overwrite: + description: |- + Overwrite defines whether or not to overwrite any existing filesystem. + If true, any pre-existing file system will be destroyed. Use with Caution. + type: boolean + partition: + description: 'Partition specifies the partition + to use. The valid options are: "auto|any", "auto", + "any", "none", and , where NUM is the actual + partition number.' + type: string + required: + - device + - filesystem + - label + type: object + type: array + partitions: + description: Partitions specifies the list of the partitions + to setup. + items: + description: Partition defines how to create and layout + a partition. + properties: + device: + description: Device is the name of the device. + type: string + layout: + description: |- + Layout specifies the device layout. + If it is true, a single partition will be created for the entire device. + When layout is false, it means don't partition or ignore existing partitioning. + type: boolean + overwrite: + description: |- + Overwrite describes whether to skip checks and create the partition if a partition or filesystem is found on the device. + Use with caution. Default is 'false'. + type: boolean + tableType: + description: |- + TableType specifies the tupe of partition table. The following are supported: + 'mbr': default and setups a MS-DOS partition table + 'gpt': setups a GPT partition table + type: string + required: + - device + - layout + type: object + type: array + type: object + featureGates: + additionalProperties: + type: boolean + description: FeatureGates holds key-value pairs to enable + or disable application features. + type: object + files: + description: Files specifies extra files to be passed to user_data + upon creation. + items: + description: File defines the input for generating write_files + in cloud-init. + properties: + append: + description: Append specifies whether to append Content + to existing file if Path exists. + type: boolean + content: + description: Content is the actual content of the file. + type: string + contentFrom: + description: ContentFrom is a referenced source of content + to populate the file. + properties: + secret: + description: Secret represents a secret that should + populate this file. + properties: + key: + description: Key is the key in the secret's + data map for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + encoding: + description: Encoding specifies the encoding of the + file contents. + enum: + - base64 + - gzip + - gzip+base64 + type: string + owner: + description: Owner specifies the ownership of the file, + e.g. "root:root". + type: string + path: + description: Path specifies the full path on disk where + to store the file. + type: string + permissions: + description: Permissions specifies the permissions to + assign to the file, e.g. "0640". + type: string + required: + - path + type: object + type: array + instance: + description: Instance contains options for the node's operating + system and devices. + properties: + localStorage: + description: LocalStorage contains options for configuring + EC2 instance stores. + properties: + disabledMounts: + description: |- + DisabledMounts is a list of directories that will not be mounted to LocalStorage. + By default, all mounts are enabled. + items: + description: DisabledMount specifies a directory + that should not be mounted onto local storage. + enum: + - Containerd + - PodLogs + type: string + type: array + mountPath: + description: |- + MountPath is the path where the filesystem will be mounted. + Defaults to "/mnt/k8s-disks/". + type: string + strategy: + description: Strategy specifies how to handle an instance's + local storage devices. + enum: + - RAID0 + - RAID10 + - Mount + type: string + required: + - strategy + type: object + type: object + kubelet: + description: Kubelet contains options for kubelet. + properties: + config: + description: Config is a KubeletConfiguration that will + be merged with the defaults. + type: object + x-kubernetes-preserve-unknown-fields: true + flags: + description: Flags are command-line kubelet arguments + that will be appended to the defaults. + items: + type: string + type: array + type: object + mounts: + description: Mounts specifies a list of mount points to be + setup. + items: + description: MountPoints defines input for generated mounts + in cloud-init. + items: + type: string + type: array + type: array + ntp: + description: NTP specifies NTP configuration. + properties: + enabled: + description: Enabled specifies whether NTP should be enabled + type: boolean + servers: + description: Servers specifies which NTP servers to use + items: + type: string + type: array + type: object + preBootstrapCommands: + description: PreBootstrapCommands specifies extra commands + to run before bootstrapping nodes. + items: + type: string + type: array + users: + description: Users specifies extra users to add. + items: + description: User defines the input for a generated user + in cloud-init. + properties: + gecos: + description: Gecos specifies the gecos to use for the + user + type: string + groups: + description: Groups specifies the additional groups + for the user + type: string + homeDir: + description: HomeDir specifies the home directory to + use for the user + type: string + inactive: + description: Inactive specifies whether to mark the + user as inactive + type: boolean + lockPassword: + description: LockPassword specifies if password login + should be disabled + type: boolean + name: + description: Name specifies the username + type: string + passwd: + description: Passwd specifies a hashed password for + the user + type: string + passwdFrom: + description: PasswdFrom is a referenced source of passwd + to populate the passwd. + properties: + secret: + description: Secret represents a secret that should + populate this password. + properties: + key: + description: Key is the key in the secret's + data map for this value. + type: string + name: + description: Name of the secret in the KubeadmBootstrapConfig's + namespace to use. + type: string + required: + - key + - name + type: object + required: + - secret + type: object + primaryGroup: + description: PrimaryGroup specifies the primary group + for the user + type: string + shell: + description: Shell specifies the user's shell + type: string + sshAuthorizedKeys: + description: SSHAuthorizedKeys specifies a list of ssh + authorized keys for the user + items: + type: string + type: array + sudo: + description: Sudo specifies a sudo role for the user + type: string + required: + - name + type: object + type: array type: object type: object required: From 7e108ee47879c63b980f541d700bcf80b06b4538 Mon Sep 17 00:00:00 2001 From: faiq Date: Fri, 12 Sep 2025 13:19:39 -0700 Subject: [PATCH 04/18] refactor: use a common file resolver so nodeadmconfig can use it --- .../eks/controllers/eksconfig_controller.go | 39 +------------ bootstrap/eks/controllers/file_resolver.go | 56 +++++++++++++++++++ 2 files changed, 58 insertions(+), 37 deletions(-) create mode 100644 bootstrap/eks/controllers/file_resolver.go diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index ca55199a6b..f14e0b928b 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -27,7 +27,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -146,41 +145,6 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, r.joinWorker(ctx, cluster, config, configOwner) } -func (r *EKSConfigReconciler) resolveFiles(ctx context.Context, cfg *eksbootstrapv1.EKSConfig) ([]eksbootstrapv1.File, error) { - collected := make([]eksbootstrapv1.File, 0, len(cfg.Spec.Files)) - - for i := range cfg.Spec.Files { - in := cfg.Spec.Files[i] - if in.ContentFrom != nil { - data, err := r.resolveSecretFileContent(ctx, cfg.Namespace, in) - if err != nil { - return nil, errors.Wrapf(err, "failed to resolve file source") - } - in.ContentFrom = nil - in.Content = string(data) - } - collected = append(collected, in) - } - - return collected, nil -} - -func (r *EKSConfigReconciler) resolveSecretFileContent(ctx context.Context, ns string, source eksbootstrapv1.File) ([]byte, error) { - secret := &corev1.Secret{} - key := types.NamespacedName{Namespace: ns, Name: source.ContentFrom.Secret.Name} - if err := r.Client.Get(ctx, key, secret); err != nil { - if apierrors.IsNotFound(err) { - return nil, errors.Wrapf(err, "secret not found: %s", key) - } - return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key) - } - data, ok := secret.Data[source.ContentFrom.Secret.Key] - if !ok { - return nil, errors.Errorf("secret references non-existent secret key: %q", source.ContentFrom.Secret.Key) - } - return data, nil -} - func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.EKSConfig, configOwner *bsutil.ConfigOwner) error { log := logger.FromContext(ctx) @@ -227,7 +191,8 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 } log.Info("Generating userdata") - files, err := r.resolveFiles(ctx, config) + fileResolver := FileResolver{Client: r.Client} + files, err := fileResolver.ResolveFiles(ctx, config.Namespace, config.Spec.Files) if err != nil { log.Info("Failed to resolve files for user data") conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) diff --git a/bootstrap/eks/controllers/file_resolver.go b/bootstrap/eks/controllers/file_resolver.go new file mode 100644 index 0000000000..2235d8fb64 --- /dev/null +++ b/bootstrap/eks/controllers/file_resolver.go @@ -0,0 +1,56 @@ +package controllers + +import ( + "context" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta2" +) + +// FileResolver provides methods to resolve files and their content from secrets. +type FileResolver struct { + Client client.Reader +} + +// ResolveFiles resolves the content of files, fetching data from referenced secrets if needed. +func (fr *FileResolver) ResolveFiles(ctx context.Context, namespace string, files []eksbootstrapv1.File) ([]eksbootstrapv1.File, error) { + collected := make([]eksbootstrapv1.File, 0, len(files)) + + for i := range files { + in := files[i] + if in.ContentFrom != nil { + data, err := fr.ResolveSecretFileContent(ctx, namespace, in) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve file source") + } + in.ContentFrom = nil + in.Content = string(data) + } + collected = append(collected, in) + } + + return collected, nil +} + +// ResolveSecretFileContent fetches the content of a file from a referenced secret. +func (fr *FileResolver) ResolveSecretFileContent(ctx context.Context, ns string, source eksbootstrapv1.File) ([]byte, error) { + secret := &corev1.Secret{} + key := types.NamespacedName{Namespace: ns, Name: source.ContentFrom.Secret.Name} + if err := fr.Client.Get(ctx, key, secret); err != nil { + if apierrors.IsNotFound(err) { + return nil, errors.Wrapf(err, "secret not found: %s", key) + } + return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key) + } + data, ok := secret.Data[source.ContentFrom.Secret.Key] + if !ok { + return nil, errors.Errorf("secret references non-existent secret key: %q", source.ContentFrom.Secret.Key) + } + return data, nil +} From af310a22cf0dae7a207bf0bbb05d433658fabbf9 Mon Sep 17 00:00:00 2001 From: faiq Date: Fri, 12 Sep 2025 13:19:58 -0700 Subject: [PATCH 05/18] feat: implements nodeadm config controller --- .../controllers/nodeadmconfig_controller.go | 384 ++++++++++++- bootstrap/eks/internal/userdata/nodeadm.go | 336 +++++++++++ .../eks/internal/userdata/nodeadm_test.go | 538 ++++++++++++++++++ 3 files changed, 1238 insertions(+), 20 deletions(-) create mode 100644 bootstrap/eks/internal/userdata/nodeadm.go create mode 100644 bootstrap/eks/internal/userdata/nodeadm_test.go diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index 45b416997b..17e73c3634 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -1,47 +1,391 @@ package controllers import ( + "bytes" "context" + "time" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bsutil "sigs.k8s.io/cluster-api/bootstrap/util" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" ) // NodeadmConfigReconciler reconciles a NodeadmConfig object type NodeadmConfigReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + WatchFilterValue string } // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=nodeadmconfigs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=nodeadmconfigs/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=nodeadmconfigs/finalizers,verbs=update +// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machinepools;clusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete; +func (r *NodeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { + log := logger.FromContext(ctx) -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the NodeadmConfig object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/reconcile -func (r *NodeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = logf.FromContext(ctx) + // get NodeadmConfig + config := &eksbootstrapv1.NodeadmConfig{} + if err := r.Client.Get(ctx, req.NamespacedName, config); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "Failed to get config") + return ctrl.Result{}, err + } + log = log.WithValues("NodeadmConfig", config.GetName()) - // TODO(user): your logic here + // check owner references and look up owning Machine object + configOwner, err := bsutil.GetTypedConfigOwner(ctx, r.Client, config) + if apierrors.IsNotFound(err) { + // no error here, requeue until we find an owner + log.Debug("NodeadmConfig failed to look up owner reference, re-queueing") + return ctrl.Result{RequeueAfter: time.Minute}, nil + } + if err != nil { + log.Error(err, "NodeadmConfig failed to get owner") + return ctrl.Result{}, err + } + if configOwner == nil { + // no error, requeue until we find an owner + log.Debug("NodeadmConfig has no owner reference set, re-queueing") + return ctrl.Result{RequeueAfter: time.Minute}, nil + } - return ctrl.Result{}, nil + log = log.WithValues(configOwner.GetKind(), configOwner.GetName()) + + cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) + if err != nil { + if errors.Is(err, util.ErrNoCluster) { + log.Info("NodeadmConfig does not belong to a cluster yet, re-queuing until it's part of a cluster") + return ctrl.Result{RequeueAfter: time.Minute}, nil + } + if apierrors.IsNotFound(err) { + log.Info("Cluster does not exist yet, re-queueing until it is created") + return ctrl.Result{RequeueAfter: time.Minute}, nil + } + log.Error(err, "Could not get cluster with metadata") + return ctrl.Result{}, err + } + log = log.WithValues("cluster", klog.KObj(cluster)) + + if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, config); err != nil || isPaused || conditionChanged { + return ctrl.Result{}, err + } + + patchHelper, err := patch.NewHelper(config, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // set up defer block for updating config + defer func() { + conditions.SetSummary(config, + conditions.WithConditions( + eksbootstrapv1.DataSecretAvailableCondition, + ), + conditions.WithStepCounter(), + ) + + patchOpts := []patch.Option{} + if rerr == nil { + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + } + if err := patchHelper.Patch(ctx, config, patchOpts...); err != nil { + log.Error(rerr, "Failed to patch config") + if rerr == nil { + rerr = err + } + } + }() + + return ctrl.Result{}, r.joinWorker(ctx, cluster, config, configOwner) +} + +func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.NodeadmConfig, configOwner *bsutil.ConfigOwner) error { + log := logger.FromContext(ctx) + + // only need to reconcile the secret for Machine kinds once, but MachinePools need updates for new launch templates + if config.Status.DataSecretName != nil && configOwner.GetKind() == "Machine" { + secretKey := client.ObjectKey{Namespace: config.Namespace, Name: *config.Status.DataSecretName} + log = log.WithValues("data-secret-name", secretKey.Name) + existingSecret := &corev1.Secret{} + + // No error here means the Secret exists and we have no + // reason to proceed. + err := r.Client.Get(ctx, secretKey, existingSecret) + switch { + case err == nil: + return nil + case !apierrors.IsNotFound(err): + log.Error(err, "unable to check for existing bootstrap secret") + return err + } + } + + if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { + return errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider") + } + + if !cluster.Status.InfrastructureReady { + log.Info("Cluster infrastructure is not ready") + conditions.MarkFalse(config, + eksbootstrapv1.DataSecretAvailableCondition, + eksbootstrapv1.WaitingForClusterInfrastructureReason, + clusterv1.ConditionSeverityInfo, "") + return nil + } + + if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + log.Info("Control Plane has not yet been initialized") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1.ConditionSeverityInfo, "") + return nil + } + + controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} + if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil { + return err + } + + log.Info("Generating userdata") + fileResolver := FileResolver{Client: r.Client} + files, err := fileResolver.ResolveFiles(ctx, config.Namespace, config.Spec.Files) + if err != nil { + log.Info("Failed to resolve files for user data") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + return err + } + + nodeInput := &userdata.NodeadmInput{ + // AWSManagedControlPlane webhooks default and validate EKSClusterName + ClusterName: controlPlane.Spec.EKSClusterName, + KubeletFlags: config.Spec.Kubelet.Flags, + FeatureGates: config.Spec.FeatureGates, + Instance: config.Spec.Instance, + ContainerdConfig: config.Spec.Containerd.Config, + ContainerdBaseRuntimeSpec: config.Spec.Containerd.BaseRuntimeSpec, + PreBootstrapCommands: config.Spec.PreBootstrapCommands, + Users: config.Spec.Users, + NTP: config.Spec.NTP, + DiskSetup: config.Spec.DiskSetup, + Mounts: config.Spec.Mounts, + Files: files, + } + + // generate userdata + userDataScript, err := userdata.NewNodeadmUserdata(nodeInput) + if err != nil { + log.Error(err, "Failed to create a worker join configuration") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + return err + } + + // store userdata as secret + if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil { + log.Error(err, "Failed to store bootstrap data") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + return err + } + + return nil +} + +// storeBootstrapData creates a new secret with the data passed in as input, +// sets the reference in the configuration status and ready to true. +func (r *NodeadmConfigReconciler) storeBootstrapData(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.NodeadmConfig, data []byte) error { + log := logger.FromContext(ctx) + + // as secret creation and scope.Config status patch are not atomic operations + // it is possible that secret creation happens but the config.Status patches are not applied + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: config.Name, + Namespace: config.Namespace, + }, secret); err != nil { + if apierrors.IsNotFound(err) { + if secret, err = r.createBootstrapSecret(ctx, cluster, config, data); err != nil { + return errors.Wrap(err, "failed to create bootstrap data secret for EKSConfig") + } + log.Info("created bootstrap data secret for EKSConfig", "secret", klog.KObj(secret)) + } else { + return errors.Wrap(err, "failed to get data secret for EKSConfig") + } + } else { + updated, err := r.updateBootstrapSecret(ctx, secret, data) + if err != nil { + return errors.Wrap(err, "failed to update data secret for EKSConfig") + } + if updated { + log.Info("updated bootstrap data secret for EKSConfig", "secret", klog.KObj(secret)) + } else { + log.Trace("no change in bootstrap data secret for EKSConfig", "secret", klog.KObj(secret)) + } + } + + config.Status.DataSecretName = ptr.To[string](secret.Name) + config.Status.Ready = true + conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) + return nil +} + +func (r *NodeadmConfigReconciler) createBootstrapSecret(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.NodeadmConfig, data []byte) (*corev1.Secret, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Name, + Namespace: config.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: eksbootstrapv1.GroupVersion.String(), + Kind: "NodeadmConfig", + Name: config.Name, + UID: config.UID, + Controller: ptr.To[bool](true), + }, + }, + }, + Data: map[string][]byte{ + "value": data, + }, + Type: clusterv1.ClusterSecretType, + } + return secret, r.Client.Create(ctx, secret) +} + +// Update the userdata in the bootstrap Secret. +func (r *NodeadmConfigReconciler) updateBootstrapSecret(ctx context.Context, secret *corev1.Secret, data []byte) (bool, error) { + if secret.Data == nil { + secret.Data = make(map[string][]byte) + } + if !bytes.Equal(secret.Data["value"], data) { + secret.Data["value"] = data + return true, r.Client.Update(ctx, secret) + } + return false, nil } // SetupWithManager sets up the controller with the Manager. -func (r *NodeadmConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). +func (r *NodeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, option controller.Options) error { + b := ctrl.NewControllerManagedBy(mgr). For(&eksbootstrapv1.NodeadmConfig{}). - Named("nodeadmconfig"). - Complete(r) + WithOptions(option). + WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)). + Watches( + &clusterv1.Machine{}, + handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc), + ) + + if feature.Gates.Enabled(feature.MachinePool) { + b = b.Watches( + &expclusterv1.MachinePool{}, + handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), + ) + } + + c, err := b.Build(r) + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + + err = c.Watch( + source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc((r.ClusterToNodeadmConfigs)), + predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())), + ) + if err != nil { + return errors.Wrap(err, "failed adding watch for Clusters to controller manager") + } + return nil +} + +func (r *NodeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { + result := []ctrl.Request{} + + m, ok := o.(*clusterv1.Machine) + if !ok { + klog.Errorf("Expected a Machine but got a %T", o) + } + if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == eksbootstrapv1.GroupVersion.WithKind("NodeadmConfig") { + name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + return result +} + +// MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be uses to enqueue requests +// for NodeadmConfig reconciliation. +func (r *NodeadmConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { + result := []ctrl.Request{} + + m, ok := o.(*expclusterv1.MachinePool) + if !ok { + klog.Errorf("Expected a MachinePool but got a %T", o) + } + configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef + if configRef != nil && configRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("NodeadmConfig").GroupKind() { + name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + + return result +} + +// ClusterToNodeadmConfigs is a handler.ToRequestsFunc to be used to enqueue requests for +// NodeadmConfig reconciliation. +func (r *NodeadmConfigReconciler) ClusterToNodeadmConfigs(_ context.Context, o client.Object) []ctrl.Request { + result := []ctrl.Request{} + + c, ok := o.(*clusterv1.Cluster) + if !ok { + klog.Errorf("Expected a Cluster but got a %T", o) + } + + selectors := []client.ListOption{ + client.InNamespace(c.Namespace), + client.MatchingLabels{ + clusterv1.ClusterNameLabel: c.Name, + }, + } + + machineList := &clusterv1.MachineList{} + if err := r.Client.List(context.Background(), machineList, selectors...); err != nil { + return nil + } + + for _, m := range machineList.Items { + if m.Spec.Bootstrap.ConfigRef != nil && + m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("NodeadmConfig").GroupKind() { + name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + } + + return result } diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go new file mode 100644 index 0000000000..197faac6c8 --- /dev/null +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -0,0 +1,336 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package userdata + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" +) + +const ( + boundary = "//" + + cloudInitUserData = ` +--{{.Boundary}} +Content-Type: text/cloud-config +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.yaml" + +#cloud-config +{{- if .Files }} +{{template "files" .Files}} +{{- end }} +runcmd: +{{- template "ntp" .NTP }} +{{- template "users" .Users }} +{{- template "disk_setup" .DiskSetup}} +{{- template "fs_setup" .DiskSetup}} +{{- template "mounts" .Mounts}} +--{{.Boundary}}` + + // Shell script part template for nodeadm. + shellScriptPartTemplate = ` +--{{.Boundary}} +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="commands.sh" + +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +{{- range .PreBootstrapCommands}} +{{.}} +{{- end}} +--{{ .Boundary }}` + + // Node config part template for nodeadm. + nodeConfigPartTemplate = ` +--{{.Boundary}} +Content-Type: application/node.eks.aws + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: {{.ClusterName}} + apiServerEndpoint: {{.APIServerEndpoint}} + certificateAuthority: {{.CACert}} + cidr: {{if .ServiceCIDR}}{{.ServiceCIDR}}{{else}}172.20.0.0/16{{end}} + {{- if .FeatureGates }} + featureGates: + {{- range $k, $v := .FeatureGates }} + {{$k}}: {{$v}} + {{- end }} + {{- end }} + kubelet: + {{- if .KubeletConfig }} + config: | +{{ Indent 6 .KubeletConfigRaw }} + {{- end }} + flags: + {{- range $flag := .KubeletFlags }} + - "{{$flag}}" + {{- end }} + {{- if or .ContainerdConfig .ContainerdBaseRuntimeSpec }} + containerd: + {{- if .ContainerdConfig }} + config: | +{{ Indent 6 .ContainerdConfig }} + {{- end }} + {{- if .ContainerdBaseRuntimeSpec }} + baseRuntimeSpec: | +{{ Indent 6 .ContainerdBaseRuntimeSpecRaw }} + {{- end }} + {{- end }} + {{- if .Instance }} + instance: + {{- if .Instance.LocalStorage }} + localStorage: + strategy: {{ .Instance.LocalStorage.Strategy }} + {{- with .Instance.LocalStorage.MountPath }} + mountPath: {{ . }} + {{- end }} + {{- with .Instance.LocalStorage.DisabledMounts }} + disabledMounts: + {{- range . }} + - {{ . }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +--{{.Boundary}}` + + nodeLabelImage = "eks.amazonaws.com/nodegroup-image" + nodeLabelNodeGroup = "eks.amazonaws.com/nodegroup" + nodeLabelCapacityType = "eks.amazonaws.com/capacityType" +) + +// NodeadmInput contains all the information required to generate user data for a node. +type NodeadmInput struct { + ClusterName string + KubeletFlags []string + KubeletConfig *runtime.RawExtension + ContainerdConfig string + ContainerdBaseRuntimeSpec *runtime.RawExtension + FeatureGates map[eksbootstrapv1.Feature]bool + Instance *eksbootstrapv1.InstanceOptions + + PreBootstrapCommands []string + Files []eksbootstrapv1.File + DiskSetup *eksbootstrapv1.DiskSetup + Mounts []eksbootstrapv1.MountPoints + Users []eksbootstrapv1.User + NTP *eksbootstrapv1.NTP + + AMIImageID string + APIServerEndpoint string + Boundary string + CACert string + CapacityType *v1beta2.ManagedMachinePoolCapacityType + ServiceCIDR string // Service CIDR range for the cluster + ClusterDNS string + NodeGroupName string + NodeLabels string // Not exposed in CRD, computed from user input +} + +func (input *NodeadmInput) setKubeletFlags() { + var nodeLabels string + newFlags := []string{} + for _, flag := range input.KubeletFlags { + if strings.HasPrefix(flag, "--node-labels=") { + nodeLabels = strings.TrimPrefix(flag, "--node-labels=") + } else { + newFlags = append(newFlags, flag) + } + } + labelsMap := make(map[string]string) + if nodeLabels != "" { + labels := strings.Split(nodeLabels, ",") + for _, label := range labels { + labelSplit := strings.Split(label, "=") + labelKey := labelSplit[0] + labelValue := labelSplit[1] + labelsMap[labelKey] = labelValue + } + } + if _, ok := labelsMap[nodeLabelImage]; !ok && input.AMIImageID != "" { + labelsMap[nodeLabelImage] = input.AMIImageID + } + if _, ok := labelsMap[nodeLabelNodeGroup]; !ok && input.NodeGroupName != "" { + labelsMap[nodeLabelNodeGroup] = input.NodeGroupName + } + if _, ok := labelsMap[nodeLabelCapacityType]; !ok { + labelsMap[nodeLabelCapacityType] = input.getCapacityTypeString() + } + stringBuilder := strings.Builder{} + for key, value := range labelsMap { + stringBuilder.WriteString(fmt.Sprintf("%s=%s,", key, value)) + } + newLabels := stringBuilder.String()[:len(stringBuilder.String())-1] // remove the last comma + newFlags = append(newFlags, fmt.Sprintf("--node-labels=%s", newLabels)) + input.KubeletFlags = newFlags +} + +// getCapacityTypeString returns the string representation of the capacity type. +func (ni *NodeadmInput) getCapacityTypeString() string { + if ni.CapacityType == nil { + return "ON_DEMAND" + } + switch *ni.CapacityType { + case v1beta2.ManagedMachinePoolCapacityTypeSpot: + return "SPOT" + case v1beta2.ManagedMachinePoolCapacityTypeOnDemand: + return "ON_DEMAND" + default: + return strings.ToUpper(string(*ni.CapacityType)) + } +} + +// ContainerdBaseRuntimeSpecRaw returns the raw JSON/YAML for inclusion into templates. +func (input *NodeadmInput) ContainerdBaseRuntimeSpecRaw() string { + if input.ContainerdBaseRuntimeSpec == nil { + return "" + } + if len(input.ContainerdBaseRuntimeSpec.Raw) == 0 { + return "" + } + return string(input.ContainerdBaseRuntimeSpec.Raw) +} + +// KubeletConfigRaw returns the raw JSON/YAML for inclusion into templates. +func (input *NodeadmInput) KubeletConfigRaw() string { + if input.KubeletConfig == nil { + return "" + } + if len(input.KubeletConfig.Raw) == 0 { + return "" + } + return string(input.KubeletConfig.Raw) +} + +// validateNodeInput validates the input for nodeadm user data generation. +func validateNodeadmInput(input *NodeadmInput) error { + if input.APIServerEndpoint == "" { + return fmt.Errorf("API server endpoint is required for nodeadm") + } + if input.CACert == "" { + return fmt.Errorf("CA certificate is required for nodeadm") + } + if input.ClusterName == "" { + return fmt.Errorf("cluster name is required for nodeadm") + } + if input.NodeGroupName == "" { + return fmt.Errorf("node group name is required for nodeadm") + } + + if input.Boundary == "" { + input.Boundary = boundary + } + input.setKubeletFlags() + + klog.V(2).Infof("Nodeadm Userdata Generation - node-labels: %s", input.NodeLabels) + + return nil +} + +// NewNode returns the user data string to be used on a node instance. +func NewNodeadmUserdata(input *NodeadmInput) ([]byte, error) { + if err := validateNodeadmInput(input); err != nil { + return nil, err + } + + var buf bytes.Buffer + + // Write MIME header + if _, err := buf.WriteString(fmt.Sprintf("MIME-Version: 1.0\nContent-Type: multipart/mixed; boundary=%q\n\n", input.Boundary)); err != nil { + return nil, fmt.Errorf("failed to write MIME header: %v", err) + } + + // Write shell script part if needed + if len(input.PreBootstrapCommands) > 0 { + shellScriptTemplate := template.Must(template.New("shell").Parse(shellScriptPartTemplate)) + if err := shellScriptTemplate.Execute(&buf, input); err != nil { + return nil, fmt.Errorf("failed to execute shell script template: %v", err) + } + if _, err := buf.WriteString("\n"); err != nil { + return nil, fmt.Errorf("failed to write newline: %v", err) + } + } + + // Write node config part + nodeConfigTemplate := template.Must( + template.New("node"). + Funcs(defaultTemplateFuncMap). + Parse(nodeConfigPartTemplate), + ) + if err := nodeConfigTemplate.Execute(&buf, input); err != nil { + return nil, fmt.Errorf("failed to execute node config template: %v", err) + } + + // Write cloud-config part + tm := template.New("Node").Funcs(defaultTemplateFuncMap) + // if any of the input fields are set, we need to write the cloud-config part + if input.NTP != nil || input.DiskSetup != nil || input.Mounts != nil || input.Users != nil || input.Files != nil { + if _, err := tm.Parse(filesTemplate); err != nil { + return nil, fmt.Errorf("failed to parse args template: %w", err) + } + if _, err := tm.Parse(ntpTemplate); err != nil { + return nil, fmt.Errorf("failed to parse ntp template: %w", err) + } + + if _, err := tm.Parse(usersTemplate); err != nil { + return nil, fmt.Errorf("failed to parse users template: %w", err) + } + + if _, err := tm.Parse(diskSetupTemplate); err != nil { + return nil, fmt.Errorf("failed to parse disk setup template: %w", err) + } + + if _, err := tm.Parse(fsSetupTemplate); err != nil { + return nil, fmt.Errorf("failed to parse fs setup template: %w", err) + } + + if _, err := tm.Parse(mountsTemplate); err != nil { + return nil, fmt.Errorf("failed to parse mounts template: %w", err) + } + + t, err := tm.Parse(cloudInitUserData) + if err != nil { + return nil, fmt.Errorf("failed to parse Node template: %w", err) + } + + if err := t.Execute(&buf, input); err != nil { + return nil, fmt.Errorf("failed to execute node user data template: %w", err) + } + } + // write the final boundry closing, all of the ones in the script use intermediate boundries + buf.Write([]byte("--")) + return buf.Bytes(), nil + +} diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go new file mode 100644 index 0000000000..b63919766b --- /dev/null +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -0,0 +1,538 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package userdata + +import ( + "fmt" + "strings" + "testing" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + "k8s.io/utils/ptr" + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" +) + +func TestAddDefaultNodeLabelsToKubeletFlags(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + in *NodeadmInput + wantNodeLabels []string + wantOtherFlags []string + }{ + { + name: "empty kubelet flags", + in: &NodeadmInput{}, + wantNodeLabels: []string{"eks.amazonaws.com/capacityType=ON_DEMAND"}, + wantOtherFlags: nil, + }, + { + name: "unrelated kubelet flag preserved", + in: &NodeadmInput{ + KubeletFlags: []string{"--register-with-taints=dedicated=infra:NoSchedule"}, + }, + wantNodeLabels: []string{"eks.amazonaws.com/capacityType=ON_DEMAND"}, + wantOtherFlags: []string{"--register-with-taints=dedicated=infra:NoSchedule"}, + }, + { + name: "existing node-labels augmented", + in: &NodeadmInput{ + KubeletFlags: []string{"--node-labels=app=foo"}, + AMIImageID: "ami-12345", + NodeGroupName: "ng-1", + }, + wantNodeLabels: []string{ + "app=foo", + "eks.amazonaws.com/nodegroup-image=ami-12345", + "eks.amazonaws.com/nodegroup=ng-1", + "eks.amazonaws.com/capacityType=ON_DEMAND", + }, + wantOtherFlags: nil, + }, + { + name: "existing eks-specific labels present", + in: &NodeadmInput{ + KubeletFlags: []string{"--node-labels=app=foo,eks.amazonaws.com/nodegroup=ng-1,eks.amazonaws.com/nodegroup-image=ami-12345,eks.amazonaws.com/capacityType=SPOT"}, + AMIImageID: "ami-12345", + NodeGroupName: "ng-1", + }, + wantNodeLabels: []string{ + "app=foo", + "eks.amazonaws.com/nodegroup=ng-1", + "eks.amazonaws.com/nodegroup-image=ami-12345", + "eks.amazonaws.com/capacityType=SPOT", + }, + wantOtherFlags: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.in.setKubeletFlags() + var gotNodeLabels []string + var gotOtherFlags []string + for _, flag := range tt.in.KubeletFlags { + if strings.HasPrefix(flag, "--node-labels=") { + labels := strings.TrimPrefix(flag, "--node-labels=") + gotNodeLabels = append(gotNodeLabels, strings.Split(labels, ",")...) + } else { + gotOtherFlags = append(gotOtherFlags, flag) + } + } + g.Expect(gotNodeLabels).To(ContainElements(tt.wantNodeLabels), "expected node-labels to contain %v, got %v", tt.wantNodeLabels, gotNodeLabels) + g.Expect(gotOtherFlags).To(ContainElements(tt.wantOtherFlags), "expected kubelet flags to contain %v, got %v", tt.wantOtherFlags, gotOtherFlags) + }) + } +} + +func TestNodeadmUserdata(t *testing.T) { + format.TruncatedDiff = false + g := NewWithT(t) + + type args struct { + input *NodeadmInput + } + + onDemandCapacity := v1beta2.ManagedMachinePoolCapacityTypeOnDemand + spotCapacity := v1beta2.ManagedMachinePoolCapacityTypeSpot + + tests := []struct { + name string + args args + expectErr bool + verifyOutput func(output string) bool + }{ + { + name: "basic nodeadm userdata", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "MIME-Version: 1.0") && + strings.Contains(output, "name: test-cluster") && + strings.Contains(output, "apiServerEndpoint: https://example.com") && + strings.Contains(output, "certificateAuthority: test-ca-cert") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "with kubelet flags", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + KubeletFlags: []string{ + "--node-labels=node-role.undistro.io/infra=true", + "--register-with-taints=dedicated=infra:NoSchedule", + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "node-role.undistro.io/infra=true") && + strings.Contains(output, "register-with-taints") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "with pre bootstrap commands", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + PreBootstrapCommands: []string{ + "echo 'pre-bootstrap'", + "yum install -y htop", + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "echo 'pre-bootstrap'") && + strings.Contains(output, "yum install -y htop") && + strings.Contains(output, "#!/bin/bash") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "with custom AMI", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://test-endpoint.eks.amazonaws.com", + CACert: "test-cert", + NodeGroupName: "test-nodegroup", + AMIImageID: "ami-123456", + ServiceCIDR: "192.168.0.0/16", + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "cidr: 192.168.0.0/16") && + strings.Contains(output, "nodegroup-image=ami-123456") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "cloud-config part when NTP is set", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + NTP: &eksbootstrapv1.NTP{ + Enabled: ptr.To(true), + Servers: []string{"time.google.com"}, + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "Content-Type: text/cloud-config") && + strings.Contains(output, "#cloud-config") && + strings.Contains(output, "time.google.com") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "cloud-config part when Users is set", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + Users: []eksbootstrapv1.User{ + { + Name: "testuser", + SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3..."}, + }, + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "Content-Type: text/cloud-config") && + strings.Contains(output, "#cloud-config") && + strings.Contains(output, "testuser") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "cloud-config part when DiskSetup is set", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + DiskSetup: &eksbootstrapv1.DiskSetup{ + Filesystems: []eksbootstrapv1.Filesystem{ + { + Device: "/dev/disk/azure/scsi1/lun0", + Filesystem: "ext4", + Label: "etcd_disk", + }, + }, + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "Content-Type: text/cloud-config") && + strings.Contains(output, "#cloud-config") && + strings.Contains(output, "/dev/disk/azure/scsi1/lun0") && + strings.Contains(output, "ext4") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "cloud-config part when Mounts is set", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + Mounts: []eksbootstrapv1.MountPoints{ + eksbootstrapv1.MountPoints{"/dev/disk/azure/scsi1/lun0"}, + eksbootstrapv1.MountPoints{"/mnt/etcd"}, + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "Content-Type: text/cloud-config") && + strings.Contains(output, "#cloud-config") && + strings.Contains(output, "/dev/disk/azure/scsi1/lun0") && + strings.Contains(output, "/mnt/etcd") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "boundary verification - all three parts with custom boundary", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + Boundary: "CUSTOMBOUNDARY123", + PreBootstrapCommands: []string{"echo 'pre-bootstrap'"}, + NTP: &eksbootstrapv1.NTP{ + Enabled: ptr.To(true), + Servers: []string{"time.google.com"}, + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + boundary := "CUSTOMBOUNDARY123" + return strings.Contains(output, fmt.Sprintf(`boundary="%s"`, boundary)) && + strings.Contains(output, fmt.Sprintf("--%s", boundary)) && + strings.Contains(output, fmt.Sprintf("--%s--", boundary)) && + strings.Contains(output, "Content-Type: application/node.eks.aws") && + strings.Contains(output, "Content-Type: text/x-shellscript") && + strings.Contains(output, "Content-Type: text/cloud-config") && + strings.Count(output, fmt.Sprintf("--%s", boundary)) == 6 // 3 parts * 2 boundaries each + }, + }, + { + name: "boundary verification - only node config part with default boundary", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + boundary := "//" // default boundary + return strings.Contains(output, fmt.Sprintf(`boundary="%s"`, boundary)) && + strings.Contains(output, fmt.Sprintf("--%s", boundary)) && + strings.Contains(output, fmt.Sprintf("--%s--", boundary)) && + strings.Contains(output, "Content-Type: application/node.eks.aws") && + !strings.Contains(output, "Content-Type: text/x-shellscript") && + !strings.Contains(output, "Content-Type: text/cloud-config") + }, + }, + { + name: "boundary verification - shell script and node config parts only", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + PreBootstrapCommands: []string{"echo 'test'"}, + NTP: &eksbootstrapv1.NTP{ + Enabled: ptr.To(true), + Servers: []string{"time.google.com"}, + }, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + boundary := "//" // default boundary + fmt.Println(output) + return strings.Contains(output, fmt.Sprintf(`boundary="%s"`, boundary)) && + strings.Contains(output, "Content-Type: application/node.eks.aws") && + strings.Contains(output, "Content-Type: text/x-shellscript") && + strings.Contains(output, "Content-Type: text/cloud-config") && + strings.Count(output, fmt.Sprintf("--%s", boundary)) >= 4 // 2 parts * 2 boundaries each + }, + }, + { + name: "node-labels without capacityType - should add ON_DEMAND", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + AMIImageID: "ami-123456", + KubeletFlags: []string{ + "--node-labels=app=my-app,environment=production", + }, + CapacityType: nil, // Should default to ON_DEMAND + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "app=my-app") && + strings.Contains(output, "environment=production") && + strings.Contains(output, "eks.amazonaws.com/capacityType=ON_DEMAND") && + strings.Contains(output, "eks.amazonaws.com/nodegroup-image=ami-123456") && + strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "node-labels with capacityType set to SPOT", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + AMIImageID: "ami-123456", + KubeletFlags: []string{ + "--node-labels=workload=batch", + }, + CapacityType: &spotCapacity, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "workload=batch") && + strings.Contains(output, "eks.amazonaws.com/nodegroup-image=ami-123456") && + strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && + strings.Contains(output, "eks.amazonaws.com/capacityType=SPOT") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "no existing node-labels - should only add generated labels", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + AMIImageID: "ami-789012", + KubeletFlags: []string{ + "--max-pods=100", + }, + CapacityType: &spotCapacity, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "--node-labels") && + strings.Contains(output, "eks.amazonaws.com/nodegroup-image=ami-789012") && + strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && + strings.Contains(output, "eks.amazonaws.com/capacityType=SPOT") && + strings.Contains(output, `"--max-pods=100"`) && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "verify other kubelet flags are preserved with node-labels", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + KubeletFlags: []string{ + "--node-labels=tier=workers", + "--register-with-taints=dedicated=gpu:NoSchedule", + "--max-pods=58", + }, + CapacityType: &onDemandCapacity, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "--node-labels") && + strings.Contains(output, "tier=workers") && + strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && + strings.Contains(output, "eks.amazonaws.com/capacityType=ON_DEMAND") && + strings.Contains(output, `"--register-with-taints=dedicated=gpu:NoSchedule"`) && + strings.Contains(output, `"--max-pods=58"`) && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, + { + name: "missing required fields", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + // Missing APIServerEndpoint, CACert, NodeGroupName + }, + }, + expectErr: true, + }, + { + name: "missing API server endpoint", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + // Missing APIServerEndpoint + }, + }, + expectErr: true, + }, + { + name: "missing CA certificate", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + NodeGroupName: "test-nodegroup", + // Missing CACert + }, + }, + expectErr: true, + }, + { + name: "missing node group name", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + // Missing NodeGroupName + }, + }, + expectErr: true, + }, + } + + for _, testcase := range tests { + t.Run(testcase.name, func(t *testing.T) { + bytes, err := NewNodeadmUserdata(testcase.args.input) + if testcase.expectErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + if testcase.verifyOutput != nil { + g.Expect(testcase.verifyOutput(string(bytes))).To(BeTrue(), "Output verification failed for: %s", testcase.name) + } + }) + } +} From 3961c8b9f37ca838238469effd5167a479cb5713 Mon Sep 17 00:00:00 2001 From: faiq Date: Fri, 12 Sep 2025 15:20:29 -0700 Subject: [PATCH 06/18] fix: api type issues from copy paste mistakes --- bootstrap/eks/api/v1beta2/nodeadmconfig_types.go | 3 --- bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go | 2 +- .../bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml | 2 +- config/rbac/role.yaml | 6 ------ 4 files changed, 2 insertions(+), 11 deletions(-) diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go index 29994c3977..001cb36dd3 100644 --- a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go +++ b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go @@ -6,9 +6,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // NodeadmConfigSpec defines the desired state of NodeadmConfig. type NodeadmConfigSpec struct { // Kubelet contains options for kubelet. diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go b/bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go index da38328ad9..732bd43ea2 100644 --- a/bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go +++ b/bootstrap/eks/api/v1beta2/nodeadmconfigtemplate_type.go @@ -31,7 +31,7 @@ type NodeadmConfigTemplateResource struct { } // +kubebuilder:object:root=true -// +kubebuilder:resource:path=nodeadmconfigtemplates,scope=Namespaced,categories=cluster-api,shortName=eksct +// +kubebuilder:resource:path=nodeadmconfigtemplates,scope=Namespaced,categories=cluster-api,shortName=nodeadmct // +kubebuilder:storageversion // NodeadmConfigTemplate is the Amazon EKS Bootstrap Configuration Template API. diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml index 30ee622704..45489bfd99 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml @@ -14,7 +14,7 @@ spec: listKind: NodeadmConfigTemplateList plural: nodeadmconfigtemplates shortNames: - - eksct + - nodeadmct singular: nodeadmconfigtemplate scope: Namespaced versions: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index c45d7e4029..5418e70fd3 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -79,12 +79,6 @@ rules: - patch - update - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - nodeadmconfigs/finalizers - verbs: - - update - apiGroups: - cluster.x-k8s.io resources: From 955a172c6e63ab443bb702cd2349e42a97206d2e Mon Sep 17 00:00:00 2001 From: faiq Date: Fri, 12 Sep 2025 15:27:30 -0700 Subject: [PATCH 07/18] test: adds reconciler tests for nodeadm --- bootstrap/eks/controllers/file_resolver.go | 3 +- .../controllers/nodeadmconfig_controller.go | 166 ++++++++++++-- ...odeadmconfig_controller_reconciler_test.go | 205 ++++++++++++++++++ .../nodeadmconfig_controller_test.go | 135 +++++++----- bootstrap/eks/internal/userdata/nodeadm.go | 11 +- .../eks/internal/userdata/nodeadm_test.go | 2 +- 6 files changed, 441 insertions(+), 81 deletions(-) create mode 100644 bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go diff --git a/bootstrap/eks/controllers/file_resolver.go b/bootstrap/eks/controllers/file_resolver.go index 2235d8fb64..ae59832300 100644 --- a/bootstrap/eks/controllers/file_resolver.go +++ b/bootstrap/eks/controllers/file_resolver.go @@ -5,12 +5,11 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta2" + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" ) // FileResolver provides methods to resolve files and their content from secrets. diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index 17e73c3634..aed9e23e05 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -3,6 +3,9 @@ package controllers import ( "bytes" "context" + "encoding/base64" + "fmt" + "os" "time" "github.com/pkg/errors" @@ -10,14 +13,18 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "k8s.io/utils/ptr" + infrav1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" + kubeconfigutil "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" @@ -46,6 +53,7 @@ type NodeadmConfigReconciler struct { // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machinepools;clusters,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete; + func (r *NodeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { log := logger.FromContext(ctx) @@ -124,10 +132,10 @@ func (r *NodeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques } }() - return ctrl.Result{}, r.joinWorker(ctx, cluster, config, configOwner) + return r.joinWorker(ctx, cluster, config, configOwner) } -func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.NodeadmConfig, configOwner *bsutil.ConfigOwner) error { +func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1.Cluster, config *eksbootstrapv1.NodeadmConfig, configOwner *bsutil.ConfigOwner) (ctrl.Result, error) { log := logger.FromContext(ctx) // only need to reconcile the secret for Machine kinds once, but MachinePools need updates for new launch templates @@ -141,15 +149,15 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust err := r.Client.Get(ctx, secretKey, existingSecret) switch { case err == nil: - return nil + return ctrl.Result{}, nil case !apierrors.IsNotFound(err): log.Error(err, "unable to check for existing bootstrap secret") - return err + return ctrl.Result{}, err } } if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { - return errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider") + return ctrl.Result{}, errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider") } if !cluster.Status.InfrastructureReady { @@ -158,19 +166,32 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") - return nil + return ctrl.Result{}, nil } if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { log.Info("Control Plane has not yet been initialized") conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1.ConditionSeverityInfo, "") - return nil + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil { - return err + return ctrl.Result{}, errors.Wrap(err, "failed to get control plane") + } + // Check if control plane is ready (skip in test environments) + if !conditions.IsTrue(controlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) { + // Skip control plane readiness check in test environment + if os.Getenv("TEST_ENV") != "true" { + log.Info("Waiting for control plane to be ready") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, + eksbootstrapv1.DataSecretGenerationFailedReason, + clusterv1.ConditionSeverityInfo, "Control plane is not initialized yet") + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + log.Info("Skipping control plane readiness check in test environment") } + log.Info("Control plane is ready, proceeding with userdata generation") log.Info("Generating userdata") fileResolver := FileResolver{Client: r.Client} @@ -178,41 +199,118 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust if err != nil { log.Info("Failed to resolve files for user data") conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) - return err + return ctrl.Result{}, err } + serviceCIDR := "" + if cluster.Spec.ClusterNetwork != nil && cluster.Spec.ClusterNetwork.Services != nil && len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { + serviceCIDR = cluster.Spec.ClusterNetwork.Services.CIDRBlocks[0] + } nodeInput := &userdata.NodeadmInput{ // AWSManagedControlPlane webhooks default and validate EKSClusterName - ClusterName: controlPlane.Spec.EKSClusterName, - KubeletFlags: config.Spec.Kubelet.Flags, - FeatureGates: config.Spec.FeatureGates, - Instance: config.Spec.Instance, - ContainerdConfig: config.Spec.Containerd.Config, - ContainerdBaseRuntimeSpec: config.Spec.Containerd.BaseRuntimeSpec, - PreBootstrapCommands: config.Spec.PreBootstrapCommands, - Users: config.Spec.Users, - NTP: config.Spec.NTP, - DiskSetup: config.Spec.DiskSetup, - Mounts: config.Spec.Mounts, - Files: files, + ClusterName: controlPlane.Spec.EKSClusterName, + Instance: config.Spec.Instance, + PreBootstrapCommands: config.Spec.PreBootstrapCommands, + Users: config.Spec.Users, + NTP: config.Spec.NTP, + DiskSetup: config.Spec.DiskSetup, + Mounts: config.Spec.Mounts, + Files: files, + ServiceCIDR: serviceCIDR, + APIServerEndpoint: controlPlane.Spec.ControlPlaneEndpoint.Host, + NodeGroupName: config.Name, + } + if config.Spec.Kubelet != nil { + nodeInput.KubeletFlags = config.Spec.Kubelet.Flags + } + if config.Spec.Containerd != nil { + nodeInput.ContainerdConfig = config.Spec.Containerd.Config + nodeInput.ContainerdBaseRuntimeSpec = config.Spec.Containerd.BaseRuntimeSpec + } + if config.Spec.FeatureGates != nil { + nodeInput.FeatureGates = config.Spec.FeatureGates + } + + // In test environments, provide a mock CA certificate + if os.Getenv("TEST_ENV") == "true" { + log.Info("Using mock CA certificate for test environment") + nodeInput.CACert = "mock-ca-certificate-for-testing" + } else { + // Fetch CA cert from KubeConfig secret + obj := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Name, + } + ca, err := extractCAFromSecret(ctx, r.Client, obj) + if err != nil { + log.Error(err, "Failed to extract CA from kubeconfig secret") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, + eksbootstrapv1.DataSecretGenerationFailedReason, + clusterv1.ConditionSeverityWarning, + "Failed to extract CA from kubeconfig secret: %v", err) + return ctrl.Result{}, err + } + nodeInput.CACert = ca } + // Get AMI ID and capacity type from owner resource + switch configOwner.GetKind() { + case "AWSManagedMachinePool": + amp := &expinfrav1.AWSManagedMachinePool{} + if err := r.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: configOwner.GetName()}, amp); err == nil { + log.Info("Found AWSManagedMachinePool", "name", amp.Name, "launchTemplate", amp.Spec.AWSLaunchTemplate != nil) + if amp.Spec.AWSLaunchTemplate != nil && amp.Spec.AWSLaunchTemplate.AMI.ID != nil { + nodeInput.AMIImageID = *amp.Spec.AWSLaunchTemplate.AMI.ID + log.Info("Set AMI ID from AWSManagedMachinePool launch template", "amiID", nodeInput.AMIImageID) + } else { + log.Info("No AMI ID found in AWSManagedMachinePool launch template") + } + if amp.Spec.CapacityType != nil { + nodeInput.CapacityType = amp.Spec.CapacityType + log.Info("Set capacity type from AWSManagedMachinePool", "capacityType", *amp.Spec.CapacityType) + } else { + log.Info("No capacity type found in AWSManagedMachinePool") + } + } else { + log.Info("Failed to get AWSManagedMachinePool", "error", err) + } + case "AWSMachineTemplate": + awsmt := &infrav1beta2.AWSMachineTemplate{} + var awsMTGetErr error + if awsMTGetErr = r.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: configOwner.GetName()}, awsmt); awsMTGetErr == nil { + log.Info("Found AWSMachineTemplate", "name", awsmt.Name) + if awsmt.Spec.Template.Spec.AMI.ID != nil { + nodeInput.AMIImageID = *awsmt.Spec.Template.Spec.AMI.ID + log.Info("Set AMI ID from AWSMachineTemplate", "amiID", nodeInput.AMIImageID) + } else { + log.Info("No AMI ID found in AWSMachineTemplate") + } + } + log.Info("Failed to get AWSMachineTemplate", "error", awsMTGetErr) + default: + log.Info("Config owner kind not recognized for AMI extraction", "kind", configOwner.GetKind()) + } + + log.Info("Generating nodeadm userdata", + "cluster", controlPlane.Spec.EKSClusterName, + "endpoint", nodeInput.APIServerEndpoint) // generate userdata userDataScript, err := userdata.NewNodeadmUserdata(nodeInput) if err != nil { log.Error(err, "Failed to create a worker join configuration") conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") - return err + return ctrl.Result{}, err } // store userdata as secret if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil { log.Error(err, "Failed to store bootstrap data") conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") - return err + return ctrl.Result{}, err } - return nil + conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) + return ctrl.Result{}, nil } // storeBootstrapData creates a new secret with the data passed in as input, @@ -389,3 +487,23 @@ func (r *NodeadmConfigReconciler) ClusterToNodeadmConfigs(_ context.Context, o c return result } + +func extractCAFromSecret(ctx context.Context, c client.Client, obj client.ObjectKey) (string, error) { + data, err := kubeconfigutil.FromSecret(ctx, c, obj) + if err != nil { + return "", errors.Wrapf(err, "failed to get kubeconfig secret %s", obj.Name) + } + config, err := clientcmd.Load(data) + if err != nil { + return "", errors.Wrapf(err, "failed to parse kubeconfig data from secret %s", obj.Name) + } + + // Iterate through all clusters in the kubeconfig and use the first one with CA data + for _, cluster := range config.Clusters { + if len(cluster.CertificateAuthorityData) > 0 { + return base64.StdEncoding.EncodeToString(cluster.CertificateAuthorityData), nil + } + } + + return "", fmt.Errorf("no cluster with CA data found in kubeconfig") +} diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go new file mode 100644 index 0000000000..41dd8d938a --- /dev/null +++ b/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go @@ -0,0 +1,205 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" + + // ekscontrolplanev1 is registered in suite_test; we don't reference it directly here. + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +func TestNodeadmConfigReconciler_CreateSecret(t *testing.T) { + t.Setenv("TEST_ENV", "true") + g := NewWithT(t) + + amcp := newAMCP("test-cluster") + // ensure APIServerEndpoint is set for nodeadm input validation + amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://1.2.3.4"} + cluster := newCluster(amcp.Name) + machine := newMachine(cluster, "test-machine") + cfg := newNodeadmConfig(machine) + + g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + + reconciler := NodeadmConfigReconciler{Client: testEnv.Client} + + g.Eventually(func(gomega Gomega) { + _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("Machine")) + gomega.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) + + secret := &corev1.Secret{} + g.Eventually(func(gomega Gomega) { + gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) + }).Should(Succeed()) + + g.Expect(string(secret.Data["value"])).To(ContainSubstring("apiVersion: node.eks.aws/v1alpha1")) + g.Expect(string(secret.Data["value"])).To(ContainSubstring("apiServerEndpoint: https://1.2.3.4")) +} + +func TestNodeadmConfigReconciler_UpdateSecret_ForMachinePool(t *testing.T) { + t.Setenv("TEST_ENV", "true") + g := NewWithT(t) + + amcp := newAMCP("test-cluster") + amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://5.6.7.8"} + cluster := newCluster(amcp.Name) + mp := newMachinePool(cluster, "test-mp") + cfg := newNodeadmConfig(nil) + cfg.ObjectMeta.Name = mp.Name + cfg.ObjectMeta.UID = types.UID(fmt.Sprintf("%s uid", mp.Name)) + cfg.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ + Kind: "MachinePool", + APIVersion: v1beta1.GroupVersion.String(), + Name: mp.Name, + UID: types.UID(fmt.Sprintf("%s uid", mp.Name)), + }} + cfg.Status.DataSecretName = &mp.Name + + // initial kubelet flags + cfg.Spec.Kubelet = &eksbootstrapv1.KubeletOptions{Flags: []string{"--register-with-taints=dedicated=infra:NoSchedule"}} + + g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + + reconciler := NodeadmConfigReconciler{Client: testEnv.Client} + + // first reconcile creates secret + g.Eventually(func(gomega Gomega) { + _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("MachinePool")) + gomega.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) + + secret := &corev1.Secret{} + g.Eventually(func(gomega Gomega) { + gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) + }).Should(Succeed()) + oldData := append([]byte(nil), secret.Data["value"]...) + + // change flags to force different userdata + cfg.Spec.Kubelet.Flags = []string{"--register-with-taints=dedicated=db:NoSchedule"} + + g.Eventually(func(gomega Gomega) { + _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("MachinePool")) + gomega.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) + + g.Eventually(func(gomega Gomega) { + gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) + gomega.Expect(secret.Data["value"]).NotTo(Equal(oldData)) + }).Should(Succeed()) +} + +func TestNodeadmConfigReconciler_DoesNotUpdate_ForMachineOwner(t *testing.T) { + t.Setenv("TEST_ENV", "true") + g := NewWithT(t) + + amcp := newAMCP("test-cluster") + amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://9.9.9.9"} + cluster := newCluster(amcp.Name) + machine := newMachine(cluster, "test-machine") + cfg := newNodeadmConfig(machine) + + g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + + // pre-create secret with placeholder data + pre := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: machine.Name}} + g.Expect(testEnv.Client.Create(ctx, pre)).To(Succeed()) + + reconciler := NodeadmConfigReconciler{Client: testEnv.Client} + g.Eventually(func(gomega Gomega) { + _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("Machine")) + gomega.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) + + // secret should exist but not be updated from placeholder + secret := &corev1.Secret{} + g.Eventually(func(gomega Gomega) { + gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) + gomega.Expect(secret.Data["value"]).To(BeNil()) + }).Should(Succeed()) +} + +func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) { + t.Setenv("TEST_ENV", "true") + g := NewWithT(t) + + amcp := newAMCP("test-cluster") + amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://3.3.3.3"} + // nolint:gosec // test constant + secretPath := "/etc/secret.txt" + secretContent := "secretValue" + cluster := newCluster(amcp.Name) + machine := newMachine(cluster, "test-machine") + cfg := newNodeadmConfig(machine) + cfg.Spec.Files = append(cfg.Spec.Files, eksbootstrapv1.File{ + ContentFrom: &eksbootstrapv1.FileSource{Secret: eksbootstrapv1.SecretFileSource{Name: "my-secret2", Key: "secretKey"}}, + Path: secretPath, + }) + // ensure cloud-config part is rendered + cfg.Spec.NTP = &eksbootstrapv1.NTP{Enabled: func() *bool { b := true; return &b }()} + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "my-secret2"}, + Data: map[string][]byte{"secretKey": []byte(secretContent)}, + } + + g.Expect(testEnv.Client.Create(ctx, secret)).To(Succeed()) + g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + + // expected minimal presence check + expectedContains := []string{ + "#cloud-config", + secretContent, + } + + reconciler := NodeadmConfigReconciler{Client: testEnv.Client} + g.Eventually(func(gomega Gomega) { + _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("Machine")) + gomega.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) + + got := &corev1.Secret{} + g.Eventually(func(gomega Gomega) { + gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, got)).To(Succeed()) + }).Should(Succeed()) + + for _, s := range expectedContains { + g.Expect(string(got.Data["value"])).To(ContainSubstring(s), "userdata should contain %q", s) + } +} + +// helper to build minimal expected userdata if needed +func newNodeadmUserData(clusterName, apiEndpoint string, flags []string) ([]byte, error) { + return userdata.NewNodeadmUserdata(&userdata.NodeadmInput{ + ClusterName: clusterName, + APIServerEndpoint: apiEndpoint, + CACert: "mock-ca-certificate-for-testing", + KubeletFlags: flags, + }) +} diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller_test.go b/bootstrap/eks/controllers/nodeadmconfig_controller_test.go index dae4dfdaa3..0068866703 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller_test.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller_test.go @@ -2,67 +2,98 @@ package controllers import ( "context" + "fmt" + "testing" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" - bootstrapv1beta2 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1beta2" + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -var _ = Describe("NodeadmConfig Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" +func TestNodeadmConfigReconcilerReturnEarlyIfClusterInfraNotReady(t *testing.T) { + g := NewWithT(t) - ctx := context.Background() + cluster := newCluster("cluster") + machine := newMachine(cluster, "machine") + config := newNodeadmConfig(machine) - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - nodeadmconfig := &bootstrapv1beta2.NodeadmConfig{} + cluster.Status = clusterv1.ClusterStatus{ + InfrastructureReady: false, + } + + reconciler := NodeadmConfigReconciler{ + Client: testEnv.Client, + } - BeforeEach(func() { - By("creating the custom resource for the Kind NodeadmConfig") - err := k8sClient.Get(ctx, typeNamespacedName, nodeadmconfig) - if err != nil && errors.IsNotFound(err) { - resource := &bootstrapv1beta2.NodeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - // TODO(user): Specify other spec details if needed. - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) - } - }) + g.Eventually(func(gomega Gomega) { + _, err := reconciler.joinWorker(context.Background(), cluster, config, configOwner("Machine")) + gomega.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) +} - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &bootstrapv1beta2.NodeadmConfig{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) +func TestNodeadmConfigReconcilerReturnEarlyIfClusterControlPlaneNotInitialized(t *testing.T) { + g := NewWithT(t) - By("Cleanup the specific resource instance NodeadmConfig") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &NodeadmConfigReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } + cluster := newCluster("cluster") + machine := newMachine(cluster, "machine") + config := newNodeadmConfig(machine) - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. - }) - }) -}) + cluster.Status = clusterv1.ClusterStatus{ + InfrastructureReady: true, + } + + reconciler := NodeadmConfigReconciler{ + Client: testEnv.Client, + } + + g.Eventually(func(gomega Gomega) { + _, err := reconciler.joinWorker(context.Background(), cluster, config, configOwner("Machine")) + gomega.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) +} + +func newNodeadmConfig(machine *clusterv1.Machine) *eksbootstrapv1.NodeadmConfig { + config := &eksbootstrapv1.NodeadmConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "NodeadmConfig", + APIVersion: eksbootstrapv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + } + if machine != nil { + config.ObjectMeta.Name = machine.Name + config.ObjectMeta.UID = types.UID(fmt.Sprintf("%s uid", machine.Name)) + config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Name: machine.Name, + UID: types.UID(fmt.Sprintf("%s uid", machine.Name)), + }, + } + config.Status.DataSecretName = &machine.Name + machine.Spec.Bootstrap.ConfigRef.Name = config.Name + machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace + } + if machine != nil { + config.ObjectMeta.Name = machine.Name + config.ObjectMeta.UID = types.UID(fmt.Sprintf("%s uid", machine.Name)) + config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Name: machine.Name, + UID: types.UID(fmt.Sprintf("%s uid", machine.Name)), + }, + } + config.Status.DataSecretName = &machine.Name + machine.Spec.Bootstrap.ConfigRef.Name = config.Name + machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace + } + return config +} diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index 197faac6c8..ddddf84ef4 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -159,7 +159,7 @@ type NodeadmInput struct { NodeLabels string // Not exposed in CRD, computed from user input } -func (input *NodeadmInput) setKubeletFlags() { +func (input *NodeadmInput) setKubeletFlags() error { var nodeLabels string newFlags := []string{} for _, flag := range input.KubeletFlags { @@ -174,6 +174,9 @@ func (input *NodeadmInput) setKubeletFlags() { labels := strings.Split(nodeLabels, ",") for _, label := range labels { labelSplit := strings.Split(label, "=") + if len(labelSplit) != 2 { + return fmt.Errorf("invalid label: %s", label) + } labelKey := labelSplit[0] labelValue := labelSplit[1] labelsMap[labelKey] = labelValue @@ -195,6 +198,7 @@ func (input *NodeadmInput) setKubeletFlags() { newLabels := stringBuilder.String()[:len(stringBuilder.String())-1] // remove the last comma newFlags = append(newFlags, fmt.Sprintf("--node-labels=%s", newLabels)) input.KubeletFlags = newFlags + return nil } // getCapacityTypeString returns the string representation of the capacity type. @@ -252,7 +256,10 @@ func validateNodeadmInput(input *NodeadmInput) error { if input.Boundary == "" { input.Boundary = boundary } - input.setKubeletFlags() + err := input.setKubeletFlags() + if err != nil { + return err + } klog.V(2).Infof("Nodeadm Userdata Generation - node-labels: %s", input.NodeLabels) diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index b63919766b..701b72eda0 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" ) -func TestAddDefaultNodeLabelsToKubeletFlags(t *testing.T) { +func TestSetKubeletFlags(t *testing.T) { g := NewWithT(t) tests := []struct { From c51c989ced4ca3a2223899e25be41d760970c930 Mon Sep 17 00:00:00 2001 From: faiq Date: Tue, 16 Sep 2025 11:19:02 -0700 Subject: [PATCH 08/18] fix: runs golangci-lint --- .../eks/api/v1beta2/nodeadmconfig_types.go | 18 ++++++++---- .../controllers/nodeadmconfig_controller.go | 28 ++++++++++--------- ...odeadmconfig_controller_reconciler_test.go | 18 ++---------- bootstrap/eks/internal/userdata/nodeadm.go | 14 +++++----- .../eks/internal/userdata/nodeadm_test.go | 17 +++++------ 5 files changed, 47 insertions(+), 48 deletions(-) diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go index 001cb36dd3..5d7a3884a2 100644 --- a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go +++ b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go @@ -3,6 +3,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -101,8 +102,10 @@ type LocalStorageOptions struct { type Feature string const ( - FeatureInstanceIdNodeName Feature = "InstanceIdNodeName" - FeatureFastImagePull Feature = "FastImagePull" + // FeatureInstanceIDNodeName will use EC2 instance ID as node name. + FeatureInstanceIDNodeName Feature = "InstanceIdNodeName" + // FeatureFastImagePull enables a parallel image pull for container images. + FeatureFastImagePull Feature = "FastImagePull" ) // LocalStorageStrategy specifies how to handle an instance's local storage devices. @@ -110,9 +113,12 @@ const ( type LocalStorageStrategy string const ( - RAID0Strategy LocalStorageStrategy = "RAID0" + // RAID0Strategy is a local storage strategy for EKS nodes + RAID0Strategy LocalStorageStrategy = "RAID0" + // RAID10Strategy is a local storage strategy for EKS nodes RAID10Strategy LocalStorageStrategy = "RAID10" - MountStrategy LocalStorageStrategy = "Mount" + // MountStrategy is a local storage strategy for EKS nodes + MountStrategy LocalStorageStrategy = "Mount" ) // DisabledMount specifies a directory that should not be mounted onto local storage. @@ -120,8 +126,10 @@ const ( type DisabledMount string const ( + // DisabledMountContainerd refers to /var/lib/containerd DisabledMountContainerd DisabledMount = "Containerd" - DisabledMountPodLogs DisabledMount = "PodLogs" + // DisabledMountPodLogs refers to /var/log/pods + DisabledMountPodLogs DisabledMount = "PodLogs" ) // GetConditions returns the observations of the operational state of the NodeadmConfig resource. diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index aed9e23e05..f02e328044 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -16,31 +16,31 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "k8s.io/utils/ptr" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bsutil "sigs.k8s.io/cluster-api/bootstrap/util" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - kubeconfigutil "sigs.k8s.io/cluster-api/util/kubeconfig" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bsutil "sigs.k8s.io/cluster-api/bootstrap/util" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" + kubeconfigutil "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" ) -// NodeadmConfigReconciler reconciles a NodeadmConfig object +// NodeadmConfigReconciler reconciles a NodeadmConfig object. type NodeadmConfigReconciler struct { client.Client Scheme *runtime.Scheme @@ -275,7 +275,7 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust log.Info("Failed to get AWSManagedMachinePool", "error", err) } case "AWSMachineTemplate": - awsmt := &infrav1beta2.AWSMachineTemplate{} + awsmt := &infrav1.AWSMachineTemplate{} var awsMTGetErr error if awsMTGetErr = r.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: configOwner.GetName()}, awsmt); awsMTGetErr == nil { log.Info("Found AWSMachineTemplate", "name", awsmt.Name) @@ -423,6 +423,8 @@ func (r *NodeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl return nil } +// MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enque requests for +// NodeadmConfig reconciliation. func (r *NodeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { result := []ctrl.Request{} diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go index 41dd8d938a..16a6b19f9e 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go @@ -27,11 +27,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" - // ekscontrolplanev1 is registered in suite_test; we don't reference it directly here. clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - v1beta1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) func TestNodeadmConfigReconciler_CreateSecret(t *testing.T) { @@ -76,7 +74,7 @@ func TestNodeadmConfigReconciler_UpdateSecret_ForMachinePool(t *testing.T) { cfg.ObjectMeta.UID = types.UID(fmt.Sprintf("%s uid", mp.Name)) cfg.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ Kind: "MachinePool", - APIVersion: v1beta1.GroupVersion.String(), + APIVersion: expclusterv1.GroupVersion.String(), Name: mp.Name, UID: types.UID(fmt.Sprintf("%s uid", mp.Name)), }} @@ -151,7 +149,7 @@ func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) { amcp := newAMCP("test-cluster") amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://3.3.3.3"} - // nolint:gosec // test constant + //nolint:gosec // test constant secretPath := "/etc/secret.txt" secretContent := "secretValue" cluster := newCluster(amcp.Name) @@ -193,13 +191,3 @@ func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) { g.Expect(string(got.Data["value"])).To(ContainSubstring(s), "userdata should contain %q", s) } } - -// helper to build minimal expected userdata if needed -func newNodeadmUserData(clusterName, apiEndpoint string, flags []string) ([]byte, error) { - return userdata.NewNodeadmUserdata(&userdata.NodeadmInput{ - ClusterName: clusterName, - APIServerEndpoint: apiEndpoint, - CACert: "mock-ca-certificate-for-testing", - KubeletFlags: flags, - }) -} diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index ddddf84ef4..49cd194435 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" ) @@ -202,17 +203,17 @@ func (input *NodeadmInput) setKubeletFlags() error { } // getCapacityTypeString returns the string representation of the capacity type. -func (ni *NodeadmInput) getCapacityTypeString() string { - if ni.CapacityType == nil { +func (input *NodeadmInput) getCapacityTypeString() string { + if input.CapacityType == nil { return "ON_DEMAND" } - switch *ni.CapacityType { + switch *input.CapacityType { case v1beta2.ManagedMachinePoolCapacityTypeSpot: return "SPOT" case v1beta2.ManagedMachinePoolCapacityTypeOnDemand: return "ON_DEMAND" default: - return strings.ToUpper(string(*ni.CapacityType)) + return strings.ToUpper(string(*input.CapacityType)) } } @@ -266,7 +267,7 @@ func validateNodeadmInput(input *NodeadmInput) error { return nil } -// NewNode returns the user data string to be used on a node instance. +// NewNodeadmUserdata returns the user data string to be used on a node instance. func NewNodeadmUserdata(input *NodeadmInput) ([]byte, error) { if err := validateNodeadmInput(input); err != nil { return nil, err @@ -336,8 +337,7 @@ func NewNodeadmUserdata(input *NodeadmInput) ([]byte, error) { return nil, fmt.Errorf("failed to execute node user data template: %w", err) } } - // write the final boundry closing, all of the ones in the script use intermediate boundries + // write the final boundary closing, all of the ones in the script use intermediate boundries buf.Write([]byte("--")) return buf.Bytes(), nil - } diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index 701b72eda0..8aa94c5ec1 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/gomega" "github.com/onsi/gomega/format" "k8s.io/utils/ptr" + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" ) @@ -283,8 +284,8 @@ func TestNodeadmUserdata(t *testing.T) { CACert: "test-ca-cert", NodeGroupName: "test-nodegroup", Mounts: []eksbootstrapv1.MountPoints{ - eksbootstrapv1.MountPoints{"/dev/disk/azure/scsi1/lun0"}, - eksbootstrapv1.MountPoints{"/mnt/etcd"}, + {"/dev/disk/scsi1/lun0"}, + {"/mnt/etcd"}, }, }, }, @@ -292,7 +293,7 @@ func TestNodeadmUserdata(t *testing.T) { verifyOutput: func(output string) bool { return strings.Contains(output, "Content-Type: text/cloud-config") && strings.Contains(output, "#cloud-config") && - strings.Contains(output, "/dev/disk/azure/scsi1/lun0") && + strings.Contains(output, "/dev/disk/scsi1/lun0") && strings.Contains(output, "/mnt/etcd") && strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") }, @@ -316,7 +317,7 @@ func TestNodeadmUserdata(t *testing.T) { expectErr: false, verifyOutput: func(output string) bool { boundary := "CUSTOMBOUNDARY123" - return strings.Contains(output, fmt.Sprintf(`boundary="%s"`, boundary)) && + return strings.Contains(output, fmt.Sprintf(`boundary=%q`, boundary)) && strings.Contains(output, fmt.Sprintf("--%s", boundary)) && strings.Contains(output, fmt.Sprintf("--%s--", boundary)) && strings.Contains(output, "Content-Type: application/node.eks.aws") && @@ -338,7 +339,7 @@ func TestNodeadmUserdata(t *testing.T) { expectErr: false, verifyOutput: func(output string) bool { boundary := "//" // default boundary - return strings.Contains(output, fmt.Sprintf(`boundary="%s"`, boundary)) && + return strings.Contains(output, fmt.Sprintf(`boundary=%q`, boundary)) && strings.Contains(output, fmt.Sprintf("--%s", boundary)) && strings.Contains(output, fmt.Sprintf("--%s--", boundary)) && strings.Contains(output, "Content-Type: application/node.eks.aws") && @@ -347,7 +348,7 @@ func TestNodeadmUserdata(t *testing.T) { }, }, { - name: "boundary verification - shell script and node config parts only", + name: "boundary verification - all 3 parts", args: args{ input: &NodeadmInput{ ClusterName: "test-cluster", @@ -365,11 +366,11 @@ func TestNodeadmUserdata(t *testing.T) { verifyOutput: func(output string) bool { boundary := "//" // default boundary fmt.Println(output) - return strings.Contains(output, fmt.Sprintf(`boundary="%s"`, boundary)) && + return strings.Contains(output, fmt.Sprintf(`boundary=%q`, boundary)) && strings.Contains(output, "Content-Type: application/node.eks.aws") && strings.Contains(output, "Content-Type: text/x-shellscript") && strings.Contains(output, "Content-Type: text/cloud-config") && - strings.Count(output, fmt.Sprintf("--%s", boundary)) >= 4 // 2 parts * 2 boundaries each + strings.Count(output, fmt.Sprintf("--%s", boundary)) == 6 // 3 parts * 2 boundaries each }, }, { From 2f2e34d29e62478f787ac625b3fd8e20890af361 Mon Sep 17 00:00:00 2001 From: faiq Date: Tue, 16 Sep 2025 14:47:52 -0700 Subject: [PATCH 09/18] fix: boundries in userdata --- bootstrap/eks/internal/userdata/nodeadm.go | 17 ++++++++++++----- bootstrap/eks/internal/userdata/nodeadm_test.go | 4 ++-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index 49cd194435..e8ac94e63d 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -32,8 +32,8 @@ import ( const ( boundary = "//" + // this does not start with a boundary because it is the last item that is processed. cloudInitUserData = ` ---{{.Boundary}} Content-Type: text/cloud-config MIME-Version: 1.0 Content-Transfer-Encoding: 7bit @@ -43,12 +43,19 @@ Content-Disposition: attachment; filename="cloud-config.yaml" {{- if .Files }} {{template "files" .Files}} {{- end }} -runcmd: +{{- if .NTP }} {{- template "ntp" .NTP }} +{{- end }} +{{- if .Users }} {{- template "users" .Users }} -{{- template "disk_setup" .DiskSetup}} -{{- template "fs_setup" .DiskSetup}} -{{- template "mounts" .Mounts}} +{{- end }} +{{- if .DiskSetup }} +{{- template "disk_setup" .DiskSetup }} +{{- template "fs_setup" .DiskSetup }} +{{- end }} +{{- if .Mounts }} +{{- template "mounts" .Mounts }} +{{- end }} --{{.Boundary}}` // Shell script part template for nodeadm. diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index 8aa94c5ec1..8fe8448e76 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -323,7 +323,7 @@ func TestNodeadmUserdata(t *testing.T) { strings.Contains(output, "Content-Type: application/node.eks.aws") && strings.Contains(output, "Content-Type: text/x-shellscript") && strings.Contains(output, "Content-Type: text/cloud-config") && - strings.Count(output, fmt.Sprintf("--%s", boundary)) == 6 // 3 parts * 2 boundaries each + strings.Count(output, fmt.Sprintf("--%s", boundary)) == 5 // 3 parts * 2 boundaries each except cloud-config }, }, { @@ -370,7 +370,7 @@ func TestNodeadmUserdata(t *testing.T) { strings.Contains(output, "Content-Type: application/node.eks.aws") && strings.Contains(output, "Content-Type: text/x-shellscript") && strings.Contains(output, "Content-Type: text/cloud-config") && - strings.Count(output, fmt.Sprintf("--%s", boundary)) == 6 // 3 parts * 2 boundaries each + strings.Count(output, fmt.Sprintf("--%s", boundary)) == 5 // 3 parts * 2 boundaries each except cloud-config }, }, { From 66a5d18032aede17d45e69ee6a87d12bf5d3e3bf Mon Sep 17 00:00:00 2001 From: faiq Date: Tue, 16 Sep 2025 15:13:03 -0700 Subject: [PATCH 10/18] fix: kubelet logic --- bootstrap/eks/controllers/nodeadmconfig_controller.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index f02e328044..f5dfc9eff1 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -222,6 +222,7 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust } if config.Spec.Kubelet != nil { nodeInput.KubeletFlags = config.Spec.Kubelet.Flags + nodeInput.KubeletConfig = config.Spec.Kubelet.Config } if config.Spec.Containerd != nil { nodeInput.ContainerdConfig = config.Spec.Containerd.Config From 59fbdbcd8797e9f5151cb0793d83b24b97c8f3d6 Mon Sep 17 00:00:00 2001 From: faiq Date: Tue, 16 Sep 2025 15:13:45 -0700 Subject: [PATCH 11/18] fix: adds nodeadmconfigtemplate crd --- config/crd/kustomization.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index b0bf490a5d..ddfabd4e3b 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -22,8 +22,9 @@ resources: - bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml - bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml - bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml -- bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml - bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml +- bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml +- bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml - bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml - bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml - bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml From 4e1fa66074c6afc0b84edf8f28d8f8bbf84071cd Mon Sep 17 00:00:00 2001 From: faiq Date: Wed, 17 Sep 2025 13:00:02 -0700 Subject: [PATCH 12/18] fix: nodeadm kubelet config --- .../controllers/nodeadmconfig_controller.go | 20 ++++++++------ bootstrap/eks/internal/userdata/nodeadm.go | 26 ++----------------- .../eks/internal/userdata/nodeadm_test.go | 23 +++++++++++++++- 3 files changed, 36 insertions(+), 33 deletions(-) diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index f5dfc9eff1..47d66ef797 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -222,11 +222,15 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust } if config.Spec.Kubelet != nil { nodeInput.KubeletFlags = config.Spec.Kubelet.Flags - nodeInput.KubeletConfig = config.Spec.Kubelet.Config + if config.Spec.Kubelet.Config != nil { + nodeInput.KubeletConfig = config.Spec.Kubelet.Config + } } if config.Spec.Containerd != nil { nodeInput.ContainerdConfig = config.Spec.Containerd.Config - nodeInput.ContainerdBaseRuntimeSpec = config.Spec.Containerd.BaseRuntimeSpec + if config.Spec.Containerd.BaseRuntimeSpec != nil { + nodeInput.ContainerdBaseRuntimeSpec = config.Spec.Containerd.BaseRuntimeSpec + } } if config.Spec.FeatureGates != nil { nodeInput.FeatureGates = config.Spec.FeatureGates @@ -328,21 +332,21 @@ func (r *NodeadmConfigReconciler) storeBootstrapData(ctx context.Context, cluste }, secret); err != nil { if apierrors.IsNotFound(err) { if secret, err = r.createBootstrapSecret(ctx, cluster, config, data); err != nil { - return errors.Wrap(err, "failed to create bootstrap data secret for EKSConfig") + return errors.Wrap(err, "failed to create bootstrap data secret for NodeadmConfig") } - log.Info("created bootstrap data secret for EKSConfig", "secret", klog.KObj(secret)) + log.Info("created bootstrap data secret for NodeadmConfig", "secret", klog.KObj(secret)) } else { - return errors.Wrap(err, "failed to get data secret for EKSConfig") + return errors.Wrap(err, "failed to get data secret for NodeadmConfig") } } else { updated, err := r.updateBootstrapSecret(ctx, secret, data) if err != nil { - return errors.Wrap(err, "failed to update data secret for EKSConfig") + return errors.Wrap(err, "failed to update data secret for NodeadmConfig") } if updated { - log.Info("updated bootstrap data secret for EKSConfig", "secret", klog.KObj(secret)) + log.Info("updated bootstrap data secret for NodeadmConfig", "secret", klog.KObj(secret)) } else { - log.Trace("no change in bootstrap data secret for EKSConfig", "secret", klog.KObj(secret)) + log.Trace("no change in bootstrap data secret for NodeadmConfig", "secret", klog.KObj(secret)) } } diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index e8ac94e63d..f0d92d2992 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -98,7 +98,7 @@ spec: kubelet: {{- if .KubeletConfig }} config: | -{{ Indent 6 .KubeletConfigRaw }} +{{ Indent 6 .KubeletConfig }} {{- end }} flags: {{- range $flag := .KubeletFlags }} @@ -112,7 +112,7 @@ spec: {{- end }} {{- if .ContainerdBaseRuntimeSpec }} baseRuntimeSpec: | -{{ Indent 6 .ContainerdBaseRuntimeSpecRaw }} +{{ Indent 6 .ContainerdBaseRuntimeSpec}} {{- end }} {{- end }} {{- if .Instance }} @@ -224,28 +224,6 @@ func (input *NodeadmInput) getCapacityTypeString() string { } } -// ContainerdBaseRuntimeSpecRaw returns the raw JSON/YAML for inclusion into templates. -func (input *NodeadmInput) ContainerdBaseRuntimeSpecRaw() string { - if input.ContainerdBaseRuntimeSpec == nil { - return "" - } - if len(input.ContainerdBaseRuntimeSpec.Raw) == 0 { - return "" - } - return string(input.ContainerdBaseRuntimeSpec.Raw) -} - -// KubeletConfigRaw returns the raw JSON/YAML for inclusion into templates. -func (input *NodeadmInput) KubeletConfigRaw() string { - if input.KubeletConfig == nil { - return "" - } - if len(input.KubeletConfig.Raw) == 0 { - return "" - } - return string(input.KubeletConfig.Raw) -} - // validateNodeInput validates the input for nodeadm user data generation. func validateNodeadmInput(input *NodeadmInput) error { if input.APIServerEndpoint == "" { diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index 8fe8448e76..91e2c9e231 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -160,6 +160,28 @@ func TestNodeadmUserdata(t *testing.T) { strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") }, }, + { + name: "with kubelet config", + args: args{ + input: &NodeadmInput{ + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + NodeGroupName: "test-nodegroup", + KubeletConfig: ` +evictionHard: + memory.available: "2000Mi" + + `, + }, + }, + expectErr: false, + verifyOutput: func(output string) bool { + return strings.Contains(output, "evictionHard:") && + strings.Contains(output, "memory.available: \"2000Mi\"") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") + }, + }, { name: "with pre bootstrap commands", args: args{ @@ -365,7 +387,6 @@ func TestNodeadmUserdata(t *testing.T) { expectErr: false, verifyOutput: func(output string) bool { boundary := "//" // default boundary - fmt.Println(output) return strings.Contains(output, fmt.Sprintf(`boundary=%q`, boundary)) && strings.Contains(output, "Content-Type: application/node.eks.aws") && strings.Contains(output, "Content-Type: text/x-shellscript") && From a96154852dc7b17509f1cfda4f434fff24a54704 Mon Sep 17 00:00:00 2001 From: faiq Date: Wed, 17 Sep 2025 14:10:24 -0700 Subject: [PATCH 13/18] fix: handle runtime.Rawextension properly --- bootstrap/eks/internal/userdata/nodeadm.go | 11 +++---- .../eks/internal/userdata/nodeadm_test.go | 8 +++-- bootstrap/eks/internal/userdata/utils.go | 31 +++++++++++++++++++ 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index f0d92d2992..79ace56607 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -97,8 +97,8 @@ spec: {{- end }} kubelet: {{- if .KubeletConfig }} - config: | -{{ Indent 6 .KubeletConfig }} + config: +{{ Indent 6 (toYaml .KubeletConfig) }} {{- end }} flags: {{- range $flag := .KubeletFlags }} @@ -107,12 +107,12 @@ spec: {{- if or .ContainerdConfig .ContainerdBaseRuntimeSpec }} containerd: {{- if .ContainerdConfig }} - config: | + config: {{ Indent 6 .ContainerdConfig }} {{- end }} {{- if .ContainerdBaseRuntimeSpec }} - baseRuntimeSpec: | -{{ Indent 6 .ContainerdBaseRuntimeSpec}} + baseRuntimeSpec: +{{ Indent 6 (toYaml .ContainerdBaseRuntimeSpec) }} {{- end }} {{- end }} {{- if .Instance }} @@ -238,7 +238,6 @@ func validateNodeadmInput(input *NodeadmInput) error { if input.NodeGroupName == "" { return fmt.Errorf("node group name is required for nodeadm") } - if input.Boundary == "" { input.Boundary = boundary } diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index 91e2c9e231..c13a09d1c1 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -23,6 +23,7 @@ import ( . "github.com/onsi/gomega" "github.com/onsi/gomega/format" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" @@ -168,11 +169,12 @@ func TestNodeadmUserdata(t *testing.T) { APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", NodeGroupName: "test-nodegroup", - KubeletConfig: ` + KubeletConfig: &runtime.RawExtension{ + Raw: []byte(` evictionHard: memory.available: "2000Mi" - - `, + `), + }, }, }, expectErr: false, diff --git a/bootstrap/eks/internal/userdata/utils.go b/bootstrap/eks/internal/userdata/utils.go index aee2a82125..981a6cb6cd 100644 --- a/bootstrap/eks/internal/userdata/utils.go +++ b/bootstrap/eks/internal/userdata/utils.go @@ -17,13 +17,19 @@ limitations under the License. package userdata import ( + "fmt" "strings" "text/template" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/yaml" ) var ( defaultTemplateFuncMap = template.FuncMap{ "Indent": templateYAMLIndent, + "toYaml": templateToYAML, } ) @@ -32,3 +38,28 @@ func templateYAMLIndent(i int, input string) string { ident := "\n" + strings.Repeat(" ", i) return strings.Repeat(" ", i) + strings.Join(split, ident) } + +func templateToYAML(r *runtime.RawExtension) (string, error) { + if r == nil { + return "", nil + } + if r.Object != nil { + b, err := yaml.Marshal(r.Object) + if err != nil { + return "", errors.Wrap(err, "failed to convert to yaml") + } + return string(b), nil + } + if len(r.Raw) > 0 { + if yb, err := yaml.JSONToYAML(r.Raw); err == nil { + return string(yb), nil + } + var temp interface{} + err := yaml.Unmarshal(r.Raw, &temp) + if err == nil { + return string(r.Raw), nil + } + return "", fmt.Errorf("runtime object raw is neither json nor yaml %s", string(r.Raw)) + } + return "", nil +} From 3b4537ac269a6374843760ff727770cfbdcbd2cb Mon Sep 17 00:00:00 2001 From: faiq Date: Mon, 22 Sep 2025 12:38:32 -0700 Subject: [PATCH 14/18] e2e: adds test for nodeadm --- .../managed/eks_upgrade_to_nodeadm_test.go | 174 ++++++++++++++++++ test/e2e/suites/managed/machine_deployment.go | 56 ++++++ 2 files changed, 230 insertions(+) create mode 100644 test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go diff --git a/test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go b/test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go new file mode 100644 index 0000000000..973e3f325d --- /dev/null +++ b/test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go @@ -0,0 +1,174 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managed + +import ( + "context" + "fmt" + + "github.com/blang/semver" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ref "k8s.io/client-go/tools/reference" + + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util" +) + +// EKS cluster upgrade tests. +var _ = ginkgo.Describe("EKS Cluster upgrade test", func() { + var ( + namespace *corev1.Namespace + ctx context.Context + specName = "eks-upgrade" + clusterName string + initialVersion string + upgradeToVersion string + ) + + shared.ConditionalIt(runUpgradeTests, "[managed] [upgrade] [nodeadm] should create a cluster and upgrade the kubernetes version", func() { + ginkgo.By("should have a valid test configuration") + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeFromVersion)) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeToVersion)) + ctx = context.TODO() + namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) + clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + + initialVersion = e2eCtx.E2EConfig.MustGetVariable(shared.EksUpgradeFromVersion) + upgradeToVersion = e2eCtx.E2EConfig.MustGetVariable(shared.EksUpgradeToVersion) + + ginkgo.By("default iam role should exist") + VerifyRoleExistsAndOwned(ctx, ekscontrolplanev1.DefaultEKSControlPlaneRole, clusterName, false, e2eCtx.AWSSession) + + ginkgo.By("should create an EKS control plane") + ManagedClusterSpec(ctx, func() ManagedClusterSpecInput { + return ManagedClusterSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + Flavour: EKSControlPlaneOnlyFlavor, // TODO (richardcase) - change in the future when upgrades to machinepools work + ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error + WorkerMachineCount: 0, + KubernetesVersion: initialVersion, + } + }) + + ginkgo.By(fmt.Sprintf("getting cluster with name %s", clusterName)) + cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + Name: clusterName, + }) + Expect(cluster).NotTo(BeNil(), "couldn't find cluster") + + ginkgo.By("should create a MachineDeployment") + MachineDeploymentSpec(ctx, func() MachineDeploymentSpecInput { + return MachineDeploymentSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + Replicas: 1, + Cleanup: false, + } + }) + + ginkgo.By(fmt.Sprintf("should upgrade control plane to version %s", upgradeToVersion)) + UpgradeControlPlaneVersionSpec(ctx, func() UpgradeControlPlaneVersionSpecInput { + return UpgradeControlPlaneVersionSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + AWSSession: e2eCtx.BootstrapUserAWSSession, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + ClusterName: clusterName, + Namespace: namespace, + UpgradeVersion: upgradeToVersion, + } + }) + + ginkgo.By(fmt.Sprintf("should upgrade mahchine deployments to version %s", upgradeToVersion)) + kube133, err := semver.ParseTolerant("1.33.0") + Expect(err).To(BeNil(), "semver should pass") + upgradeToVersionParse, err := semver.ParseTolerant(upgradeToVersion) + Expect(err).To(BeNil(), "semver should pass") + + md := framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{ + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...) + var nodeadmConfigTemplate *eksbootstrapv1.NodeadmConfigTemplate + if upgradeToVersionParse.GTE(kube133) { + ginkgo.By("creating a nodeadmconfigtemplate object") + nodeadmConfigTemplate = &eksbootstrapv1.NodeadmConfigTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-nodeadm-config", clusterName), + Namespace: namespace.Name, + }, + Spec: eksbootstrapv1.NodeadmConfigTemplateSpec{ + Template: eksbootstrapv1.NodeadmConfigTemplateResource{ + Spec: eksbootstrapv1.NodeadmConfigSpec{ + PreBootstrapCommands: []string{ + "echo \"hello world\"", + }, + }, + }, + }, + } + ginkgo.By("creating the nodeadm config template in the cluster") + Expect(e2eCtx.Environment.BootstrapClusterProxy.GetClient().Create(ctx, nodeadmConfigTemplate)).To(Succeed()) + } + ginkgo.By("upgrading machine deployments") + input := UpgradeMachineDeploymentsAndWaitInput{ + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + Cluster: cluster, + UpgradeVersion: upgradeToVersion, + MachineDeployments: md, + WaitForMachinesToBeUpgraded: e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes"), + } + if nodeadmConfigTemplate != nil { + nodeadmRef, err := ref.GetReference(initScheme(), nodeadmConfigTemplate) + Expect(err).To(BeNil(), "object should have ref") + input.UpgradeBootstrapTemplate = nodeadmRef + } + UpgradeMachineDeploymentsAndWait(ctx, input) + + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + Cluster: cluster, + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + ArtifactFolder: e2eCtx.Settings.ArtifactFolder, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...) + }) +}) diff --git a/test/e2e/suites/managed/machine_deployment.go b/test/e2e/suites/managed/machine_deployment.go index 4ef19a0f8d..f7ddeaadee 100644 --- a/test/e2e/suites/managed/machine_deployment.go +++ b/test/e2e/suites/managed/machine_deployment.go @@ -22,17 +22,21 @@ package managed import ( "context" "fmt" + "time" "github.com/aws/aws-sdk-go-v2/aws" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util/patch" ) // MachineDeploymentSpecInput is the input for MachineDeploymentSpec. @@ -112,3 +116,55 @@ func MachineDeploymentSpec(ctx context.Context, inputGetter func() MachineDeploy }, input.E2EConfig.GetIntervals("", "wait-delete-machine")...) } } + +// UpgradeMachineDeploymentsAndWaitInput is the input type for UpgradeMachineDeploymentsAndWait. +// This function is copied from capi-core, but also allows the user to change +// the bootstrap reference as well.. +type UpgradeMachineDeploymentsAndWaitInput struct { + BootstrapClusterProxy framework.ClusterProxy + Cluster *clusterv1.Cluster + UpgradeVersion string + UpgradeMachineTemplate *string + UpgradeBootstrapTemplate *corev1.ObjectReference + MachineDeployments []*clusterv1.MachineDeployment + WaitForMachinesToBeUpgraded []interface{} +} + +// UpgradeMachineDeploymentsAndWait upgrades a machine deployment and waits for its machines to be upgraded. +func UpgradeMachineDeploymentsAndWait(ctx context.Context, input UpgradeMachineDeploymentsAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachineDeploymentsAndWait") + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachineDeploymentsAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachineDeploymentsAndWait") + Expect(input.UpgradeVersion).ToNot(BeNil(), "Invalid argument. input.UpgradeVersion can't be nil when calling UpgradeMachineDeploymentsAndWait") + Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeMachineDeploymentsAndWait") + + mgmtClient := input.BootstrapClusterProxy.GetClient() + + for _, deployment := range input.MachineDeployments { + log := logger.FromContext(ctx) + patchHelper, err := patch.NewHelper(deployment, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + + oldVersion := deployment.Spec.Template.Spec.Version + deployment.Spec.Template.Spec.Version = &input.UpgradeVersion + if input.UpgradeMachineTemplate != nil { + deployment.Spec.Template.Spec.InfrastructureRef.Name = *input.UpgradeMachineTemplate + } + if input.UpgradeBootstrapTemplate != nil { + deployment.Spec.Template.Spec.Bootstrap.ConfigRef = input.UpgradeBootstrapTemplate + } + Eventually(func() error { + return patchHelper.Patch(ctx, deployment) + }, time.Minute*3, time.Second*3).Should(Succeed(), "Failed to patch Kubernetes version on MachineDeployment %s", klog.KObj(deployment)) + + log.Logf("Waiting for Kubernetes versions of machines in MachineDeployment %s to be upgraded from %s to %s", + deployment.Name, *oldVersion, input.UpgradeVersion) + framework.WaitForMachineDeploymentMachinesToBeUpgraded(ctx, framework.WaitForMachineDeploymentMachinesToBeUpgradedInput{ + Lister: mgmtClient, + Cluster: input.Cluster, + MachineCount: int(*deployment.Spec.Replicas), + KubernetesUpgradeVersion: input.UpgradeVersion, + MachineDeployment: *deployment, + }, input.WaitForMachinesToBeUpgraded...) + } +} From 4f4db0cae5001b9f6f36401a0e9b4f051627d2cb Mon Sep 17 00:00:00 2001 From: faiq Date: Thu, 25 Sep 2025 10:02:00 -0700 Subject: [PATCH 15/18] chore: fixes up tests to no longer require TEST_ENV variable --- .../eksconfig_controller_reconciler_test.go | 42 +++++++- .../controllers/nodeadmconfig_controller.go | 53 ++++----- ...odeadmconfig_controller_reconciler_test.go | 102 +++++++++--------- bootstrap/eks/controllers/suite_test.go | 2 + .../eks/internal/userdata/nodeadm_test.go | 6 +- 5 files changed, 116 insertions(+), 89 deletions(-) diff --git a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go index 163b94a338..6d68543f81 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" + kubeconfigutil "sigs.k8s.io/cluster-api/util/kubeconfig" ) func TestEKSConfigReconciler(t *testing.T) { @@ -292,9 +293,12 @@ func newCluster(name string) *clusterv1.Cluster { }, Status: clusterv1.ClusterStatus{ InfrastructureReady: true, + ControlPlaneReady: true, }, } conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + conditions.MarkTrue(cluster, clusterv1.ControlPlaneReadyCondition) + conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) return cluster } @@ -413,7 +417,7 @@ func newUserData(clusterName string, kubeletExtraArgs map[string]string) ([]byte // newAMCP returns an EKS AWSManagedControlPlane object. func newAMCP(name string) *ekscontrolplanev1.AWSManagedControlPlane { generatedName := fmt.Sprintf("%s-%s", name, util.RandomString(5)) - return &ekscontrolplanev1.AWSManagedControlPlane{ + amcp := &ekscontrolplanev1.AWSManagedControlPlane{ TypeMeta: metav1.TypeMeta{ Kind: "AWSManagedControlPlane", APIVersion: ekscontrolplanev1.GroupVersion.String(), @@ -426,4 +430,40 @@ func newAMCP(name string) *ekscontrolplanev1.AWSManagedControlPlane { EKSClusterName: generatedName, }, } + conditions.MarkTrue(amcp, ekscontrolplanev1.EKSControlPlaneReadyCondition) + return amcp +} + +const dummyKubeconfigTemplate = ` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCkV5QXV0aG9yIElzc3VlciBJc3N1ZXI6IGV4YW1wbGUuY29tIC0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + server: %s + name: %s +contexts: +- context: + cluster: %s + user: my-user + name: my-context +current-context: my-context +kind: Config +users: +- name: my-user + user: + token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c +` + +func newKubeconfigSecret(apiEndpoint string, cluster *clusterv1.Cluster) *corev1.Secret { + data := fmt.Sprintf(dummyKubeconfigTemplate, apiEndpoint, cluster.Name, cluster.Name) + return kubeconfigutil.GenerateSecretWithOwner( + client.ObjectKeyFromObject(cluster), + []byte(data), + metav1.OwnerReference{ + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: cluster.Name, + UID: cluster.UID, + }, + ) } diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index 47d66ef797..f974880728 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -5,7 +5,6 @@ import ( "context" "encoding/base64" "fmt" - "os" "time" "github.com/pkg/errors" @@ -179,17 +178,13 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to get control plane") } - // Check if control plane is ready (skip in test environments) + // Check if control plane is ready if !conditions.IsTrue(controlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) { - // Skip control plane readiness check in test environment - if os.Getenv("TEST_ENV") != "true" { - log.Info("Waiting for control plane to be ready") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, - eksbootstrapv1.DataSecretGenerationFailedReason, - clusterv1.ConditionSeverityInfo, "Control plane is not initialized yet") - return ctrl.Result{RequeueAfter: 30 * time.Second}, nil - } - log.Info("Skipping control plane readiness check in test environment") + log.Info("Waiting for control plane to be ready") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, + eksbootstrapv1.DataSecretGenerationFailedReason, + clusterv1.ConditionSeverityInfo, "Control plane is not initialized yet") + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } log.Info("Control plane is ready, proceeding with userdata generation") @@ -236,27 +231,21 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust nodeInput.FeatureGates = config.Spec.FeatureGates } - // In test environments, provide a mock CA certificate - if os.Getenv("TEST_ENV") == "true" { - log.Info("Using mock CA certificate for test environment") - nodeInput.CACert = "mock-ca-certificate-for-testing" - } else { - // Fetch CA cert from KubeConfig secret - obj := client.ObjectKey{ - Namespace: cluster.Namespace, - Name: cluster.Name, - } - ca, err := extractCAFromSecret(ctx, r.Client, obj) - if err != nil { - log.Error(err, "Failed to extract CA from kubeconfig secret") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, - eksbootstrapv1.DataSecretGenerationFailedReason, - clusterv1.ConditionSeverityWarning, - "Failed to extract CA from kubeconfig secret: %v", err) - return ctrl.Result{}, err - } - nodeInput.CACert = ca + // Fetch CA cert from KubeConfig secret + obj := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Name, + } + ca, err := extractCAFromSecret(ctx, r.Client, obj) + if err != nil { + log.Error(err, "Failed to extract CA from kubeconfig secret") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, + eksbootstrapv1.DataSecretGenerationFailedReason, + clusterv1.ConditionSeverityWarning, + "Failed to extract CA from kubeconfig secret: %v", err) + return ctrl.Result{}, err } + nodeInput.CACert = ca // Get AMI ID and capacity type from owner resource switch configOwner.GetKind() { @@ -350,7 +339,7 @@ func (r *NodeadmConfigReconciler) storeBootstrapData(ctx context.Context, cluste } } - config.Status.DataSecretName = ptr.To[string](secret.Name) + config.Status.DataSecretName = ptr.To(secret.Name) config.Status.Ready = true conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) return nil diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go index 16a6b19f9e..433697c791 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller_reconciler_test.go @@ -19,6 +19,7 @@ package controllers import ( "fmt" "testing" + "time" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -27,47 +28,66 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - // ekscontrolplanev1 is registered in suite_test; we don't reference it directly here. clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) func TestNodeadmConfigReconciler_CreateSecret(t *testing.T) { - t.Setenv("TEST_ENV", "true") g := NewWithT(t) amcp := newAMCP("test-cluster") - // ensure APIServerEndpoint is set for nodeadm input validation - amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://1.2.3.4"} + endpoint := clusterv1.APIEndpoint{Host: "https://9.9.9.9", Port: 6443} + amcp.Spec.ControlPlaneEndpoint = endpoint cluster := newCluster(amcp.Name) + newStatus := cluster.Status + amcpStatus := amcp.Status + g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + g.Expect(testEnv.Client.Create(ctx, cluster)).To(Succeed()) + cluster.Status = newStatus + g.Expect(testEnv.Client.Status().Update(ctx, cluster)).To(Succeed()) + amcp.Status = amcpStatus + g.Expect(testEnv.Client.Status().Update(ctx, amcp)).To(Succeed()) + kubeconfigSecret := newKubeconfigSecret("https://9.9.9.9:6443", cluster) + g.Expect(testEnv.Client.Create(ctx, kubeconfigSecret)).To(Succeed()) + machine := newMachine(cluster, "test-machine") cfg := newNodeadmConfig(machine) - - g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + g.Expect(testEnv.Client.Create(ctx, cfg)).To(Succeed()) reconciler := NodeadmConfigReconciler{Client: testEnv.Client} g.Eventually(func(gomega Gomega) { _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("Machine")) gomega.Expect(err).NotTo(HaveOccurred()) - }).Should(Succeed()) + }, time.Second*15, time.Second*5).Should(Succeed()) secret := &corev1.Secret{} g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) - }).Should(Succeed()) + }, time.Minute, time.Second*5).Should(Succeed()) g.Expect(string(secret.Data["value"])).To(ContainSubstring("apiVersion: node.eks.aws/v1alpha1")) - g.Expect(string(secret.Data["value"])).To(ContainSubstring("apiServerEndpoint: https://1.2.3.4")) + g.Expect(string(secret.Data["value"])).To(ContainSubstring("apiServerEndpoint: https://9.9.9.9")) } func TestNodeadmConfigReconciler_UpdateSecret_ForMachinePool(t *testing.T) { - t.Setenv("TEST_ENV", "true") g := NewWithT(t) amcp := newAMCP("test-cluster") - amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://5.6.7.8"} + endpoint := clusterv1.APIEndpoint{Host: "https://9.9.9.9", Port: 6443} + amcp.Spec.ControlPlaneEndpoint = endpoint cluster := newCluster(amcp.Name) + newStatus := cluster.Status + amcpStatus := amcp.Status + g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + g.Expect(testEnv.Client.Create(ctx, cluster)).To(Succeed()) + cluster.Status = newStatus + g.Expect(testEnv.Client.Status().Update(ctx, cluster)).To(Succeed()) + amcp.Status = amcpStatus + g.Expect(testEnv.Client.Status().Update(ctx, amcp)).To(Succeed()) + kubeconfigSecret := newKubeconfigSecret("https://9.9.9.9:6443", cluster) + g.Expect(testEnv.Client.Create(ctx, kubeconfigSecret)).To(Succeed()) + mp := newMachinePool(cluster, "test-mp") cfg := newNodeadmConfig(nil) cfg.ObjectMeta.Name = mp.Name @@ -79,24 +99,21 @@ func TestNodeadmConfigReconciler_UpdateSecret_ForMachinePool(t *testing.T) { UID: types.UID(fmt.Sprintf("%s uid", mp.Name)), }} cfg.Status.DataSecretName = &mp.Name - // initial kubelet flags cfg.Spec.Kubelet = &eksbootstrapv1.KubeletOptions{Flags: []string{"--register-with-taints=dedicated=infra:NoSchedule"}} - g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) - reconciler := NodeadmConfigReconciler{Client: testEnv.Client} // first reconcile creates secret g.Eventually(func(gomega Gomega) { _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("MachinePool")) gomega.Expect(err).NotTo(HaveOccurred()) - }).Should(Succeed()) + }, time.Minute, time.Second*5).Should(Succeed()) secret := &corev1.Secret{} g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) - }).Should(Succeed()) + }, time.Minute, time.Second*5).Should(Succeed()) oldData := append([]byte(nil), secret.Data["value"]...) // change flags to force different userdata @@ -105,54 +122,35 @@ func TestNodeadmConfigReconciler_UpdateSecret_ForMachinePool(t *testing.T) { g.Eventually(func(gomega Gomega) { _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("MachinePool")) gomega.Expect(err).NotTo(HaveOccurred()) - }).Should(Succeed()) + }, time.Minute, time.Second*5).Should(Succeed()) g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) gomega.Expect(secret.Data["value"]).NotTo(Equal(oldData)) - }).Should(Succeed()) + }, time.Minute, time.Second*5).Should(Succeed()) } -func TestNodeadmConfigReconciler_DoesNotUpdate_ForMachineOwner(t *testing.T) { - t.Setenv("TEST_ENV", "true") +func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) { g := NewWithT(t) amcp := newAMCP("test-cluster") - amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://9.9.9.9"} + endpoint := clusterv1.APIEndpoint{Host: "https://9.9.9.9", Port: 6443} + amcp.Spec.ControlPlaneEndpoint = endpoint cluster := newCluster(amcp.Name) - machine := newMachine(cluster, "test-machine") - cfg := newNodeadmConfig(machine) - + newStatus := cluster.Status + amcpStatus := amcp.Status g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) + g.Expect(testEnv.Client.Create(ctx, cluster)).To(Succeed()) + cluster.Status = newStatus + g.Expect(testEnv.Client.Status().Update(ctx, cluster)).To(Succeed()) + amcp.Status = amcpStatus + g.Expect(testEnv.Client.Status().Update(ctx, amcp)).To(Succeed()) + kubeconfigSecret := newKubeconfigSecret("https://9.9.9.9:6443", cluster) + g.Expect(testEnv.Client.Create(ctx, kubeconfigSecret)).To(Succeed()) - // pre-create secret with placeholder data - pre := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: machine.Name}} - g.Expect(testEnv.Client.Create(ctx, pre)).To(Succeed()) - - reconciler := NodeadmConfigReconciler{Client: testEnv.Client} - g.Eventually(func(gomega Gomega) { - _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("Machine")) - gomega.Expect(err).NotTo(HaveOccurred()) - }).Should(Succeed()) - - // secret should exist but not be updated from placeholder - secret := &corev1.Secret{} - g.Eventually(func(gomega Gomega) { - gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, secret)).To(Succeed()) - gomega.Expect(secret.Data["value"]).To(BeNil()) - }).Should(Succeed()) -} - -func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) { - t.Setenv("TEST_ENV", "true") - g := NewWithT(t) - - amcp := newAMCP("test-cluster") - amcp.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "https://3.3.3.3"} //nolint:gosec // test constant secretPath := "/etc/secret.txt" secretContent := "secretValue" - cluster := newCluster(amcp.Name) machine := newMachine(cluster, "test-machine") cfg := newNodeadmConfig(machine) cfg.Spec.Files = append(cfg.Spec.Files, eksbootstrapv1.File{ @@ -168,9 +166,7 @@ func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) { } g.Expect(testEnv.Client.Create(ctx, secret)).To(Succeed()) - g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) - // expected minimal presence check expectedContains := []string{ "#cloud-config", secretContent, @@ -180,12 +176,12 @@ func TestNodeadmConfigReconciler_ResolvesSecretFileReference(t *testing.T) { g.Eventually(func(gomega Gomega) { _, err := reconciler.joinWorker(ctx, cluster, cfg, configOwner("Machine")) gomega.Expect(err).NotTo(HaveOccurred()) - }).Should(Succeed()) + }, time.Minute, time.Second*5).Should(Succeed()) got := &corev1.Secret{} g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{Name: cfg.Name, Namespace: "default"}, got)).To(Succeed()) - }).Should(Succeed()) + }, time.Minute, time.Second*5).Should(Succeed()) for _, s := range expectedContains { g.Expect(string(got.Data["value"])).To(ContainSubstring(s), "userdata should contain %q", s) diff --git a/bootstrap/eks/controllers/suite_test.go b/bootstrap/eks/controllers/suite_test.go index 2b61ab258a..2b88d332ee 100644 --- a/bootstrap/eks/controllers/suite_test.go +++ b/bootstrap/eks/controllers/suite_test.go @@ -26,6 +26,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports + eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" ) @@ -43,6 +44,7 @@ func TestMain(m *testing.M) { func setup() { utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) + utilruntime.Must(eksbootstrapv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index c13a09d1c1..d995df0d31 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -173,14 +173,14 @@ func TestNodeadmUserdata(t *testing.T) { Raw: []byte(` evictionHard: memory.available: "2000Mi" - `), +`), }, }, }, expectErr: false, verifyOutput: func(output string) bool { return strings.Contains(output, "evictionHard:") && - strings.Contains(output, "memory.available: \"2000Mi\"") && + strings.Contains(output, "memory.available: 2000Mi") && strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") }, }, @@ -549,7 +549,7 @@ evictionHard: t.Run(testcase.name, func(t *testing.T) { bytes, err := NewNodeadmUserdata(testcase.args.input) if testcase.expectErr { - g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(HaveOccurred(), "got error when exepcted none", err) return } From f6d79e2f1bb3b8c1b7f7bda5ce3f4627c3128fd9 Mon Sep 17 00:00:00 2001 From: faiq Date: Fri, 26 Sep 2025 11:04:43 -0700 Subject: [PATCH 16/18] fix: we don't need to set eks specific labels --- .../controllers/nodeadmconfig_controller.go | 41 ---- bootstrap/eks/internal/userdata/nodeadm.go | 83 +------- .../eks/internal/userdata/nodeadm_test.go | 190 +----------------- 3 files changed, 10 insertions(+), 304 deletions(-) diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index f974880728..fb45ad5885 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -21,11 +21,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" - infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -213,7 +211,6 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust Files: files, ServiceCIDR: serviceCIDR, APIServerEndpoint: controlPlane.Spec.ControlPlaneEndpoint.Host, - NodeGroupName: config.Name, } if config.Spec.Kubelet != nil { nodeInput.KubeletFlags = config.Spec.Kubelet.Flags @@ -247,44 +244,6 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust } nodeInput.CACert = ca - // Get AMI ID and capacity type from owner resource - switch configOwner.GetKind() { - case "AWSManagedMachinePool": - amp := &expinfrav1.AWSManagedMachinePool{} - if err := r.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: configOwner.GetName()}, amp); err == nil { - log.Info("Found AWSManagedMachinePool", "name", amp.Name, "launchTemplate", amp.Spec.AWSLaunchTemplate != nil) - if amp.Spec.AWSLaunchTemplate != nil && amp.Spec.AWSLaunchTemplate.AMI.ID != nil { - nodeInput.AMIImageID = *amp.Spec.AWSLaunchTemplate.AMI.ID - log.Info("Set AMI ID from AWSManagedMachinePool launch template", "amiID", nodeInput.AMIImageID) - } else { - log.Info("No AMI ID found in AWSManagedMachinePool launch template") - } - if amp.Spec.CapacityType != nil { - nodeInput.CapacityType = amp.Spec.CapacityType - log.Info("Set capacity type from AWSManagedMachinePool", "capacityType", *amp.Spec.CapacityType) - } else { - log.Info("No capacity type found in AWSManagedMachinePool") - } - } else { - log.Info("Failed to get AWSManagedMachinePool", "error", err) - } - case "AWSMachineTemplate": - awsmt := &infrav1.AWSMachineTemplate{} - var awsMTGetErr error - if awsMTGetErr = r.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: configOwner.GetName()}, awsmt); awsMTGetErr == nil { - log.Info("Found AWSMachineTemplate", "name", awsmt.Name) - if awsmt.Spec.Template.Spec.AMI.ID != nil { - nodeInput.AMIImageID = *awsmt.Spec.Template.Spec.AMI.ID - log.Info("Set AMI ID from AWSMachineTemplate", "amiID", nodeInput.AMIImageID) - } else { - log.Info("No AMI ID found in AWSMachineTemplate") - } - } - log.Info("Failed to get AWSMachineTemplate", "error", awsMTGetErr) - default: - log.Info("Config owner kind not recognized for AMI extraction", "kind", configOwner.GetKind()) - } - log.Info("Generating nodeadm userdata", "cluster", controlPlane.Spec.EKSClusterName, "endpoint", nodeInput.APIServerEndpoint) diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index 79ace56607..19b52df1ed 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -19,14 +19,12 @@ package userdata import ( "bytes" "fmt" - "strings" "text/template" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" ) const ( @@ -95,14 +93,16 @@ spec: {{$k}}: {{$v}} {{- end }} {{- end }} +{{- if .KubeletConfig }} kubelet: - {{- if .KubeletConfig }} config: {{ Indent 6 (toYaml .KubeletConfig) }} {{- end }} + {{- if .KubeletFlags }} flags: - {{- range $flag := .KubeletFlags }} - - "{{$flag}}" + {{- range $flag := .KubeletFlags }} + - "{{$flag}}" + {{- end }} {{- end }} {{- if or .ContainerdConfig .ContainerdBaseRuntimeSpec }} containerd: @@ -133,10 +133,6 @@ spec: {{- end }} --{{.Boundary}}` - - nodeLabelImage = "eks.amazonaws.com/nodegroup-image" - nodeLabelNodeGroup = "eks.amazonaws.com/nodegroup" - nodeLabelCapacityType = "eks.amazonaws.com/capacityType" ) // NodeadmInput contains all the information required to generate user data for a node. @@ -160,68 +156,8 @@ type NodeadmInput struct { APIServerEndpoint string Boundary string CACert string - CapacityType *v1beta2.ManagedMachinePoolCapacityType ServiceCIDR string // Service CIDR range for the cluster ClusterDNS string - NodeGroupName string - NodeLabels string // Not exposed in CRD, computed from user input -} - -func (input *NodeadmInput) setKubeletFlags() error { - var nodeLabels string - newFlags := []string{} - for _, flag := range input.KubeletFlags { - if strings.HasPrefix(flag, "--node-labels=") { - nodeLabels = strings.TrimPrefix(flag, "--node-labels=") - } else { - newFlags = append(newFlags, flag) - } - } - labelsMap := make(map[string]string) - if nodeLabels != "" { - labels := strings.Split(nodeLabels, ",") - for _, label := range labels { - labelSplit := strings.Split(label, "=") - if len(labelSplit) != 2 { - return fmt.Errorf("invalid label: %s", label) - } - labelKey := labelSplit[0] - labelValue := labelSplit[1] - labelsMap[labelKey] = labelValue - } - } - if _, ok := labelsMap[nodeLabelImage]; !ok && input.AMIImageID != "" { - labelsMap[nodeLabelImage] = input.AMIImageID - } - if _, ok := labelsMap[nodeLabelNodeGroup]; !ok && input.NodeGroupName != "" { - labelsMap[nodeLabelNodeGroup] = input.NodeGroupName - } - if _, ok := labelsMap[nodeLabelCapacityType]; !ok { - labelsMap[nodeLabelCapacityType] = input.getCapacityTypeString() - } - stringBuilder := strings.Builder{} - for key, value := range labelsMap { - stringBuilder.WriteString(fmt.Sprintf("%s=%s,", key, value)) - } - newLabels := stringBuilder.String()[:len(stringBuilder.String())-1] // remove the last comma - newFlags = append(newFlags, fmt.Sprintf("--node-labels=%s", newLabels)) - input.KubeletFlags = newFlags - return nil -} - -// getCapacityTypeString returns the string representation of the capacity type. -func (input *NodeadmInput) getCapacityTypeString() string { - if input.CapacityType == nil { - return "ON_DEMAND" - } - switch *input.CapacityType { - case v1beta2.ManagedMachinePoolCapacityTypeSpot: - return "SPOT" - case v1beta2.ManagedMachinePoolCapacityTypeOnDemand: - return "ON_DEMAND" - default: - return strings.ToUpper(string(*input.CapacityType)) - } } // validateNodeInput validates the input for nodeadm user data generation. @@ -235,18 +171,11 @@ func validateNodeadmInput(input *NodeadmInput) error { if input.ClusterName == "" { return fmt.Errorf("cluster name is required for nodeadm") } - if input.NodeGroupName == "" { - return fmt.Errorf("node group name is required for nodeadm") - } if input.Boundary == "" { input.Boundary = boundary } - err := input.setKubeletFlags() - if err != nil { - return err - } - klog.V(2).Infof("Nodeadm Userdata Generation - node-labels: %s", input.NodeLabels) + klog.V(2).Infof("Nodeadm Userdata Generation %v", input) return nil } diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index d995df0d31..b490e60fe8 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -27,83 +27,8 @@ import ( "k8s.io/utils/ptr" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" ) -func TestSetKubeletFlags(t *testing.T) { - g := NewWithT(t) - - tests := []struct { - name string - in *NodeadmInput - wantNodeLabels []string - wantOtherFlags []string - }{ - { - name: "empty kubelet flags", - in: &NodeadmInput{}, - wantNodeLabels: []string{"eks.amazonaws.com/capacityType=ON_DEMAND"}, - wantOtherFlags: nil, - }, - { - name: "unrelated kubelet flag preserved", - in: &NodeadmInput{ - KubeletFlags: []string{"--register-with-taints=dedicated=infra:NoSchedule"}, - }, - wantNodeLabels: []string{"eks.amazonaws.com/capacityType=ON_DEMAND"}, - wantOtherFlags: []string{"--register-with-taints=dedicated=infra:NoSchedule"}, - }, - { - name: "existing node-labels augmented", - in: &NodeadmInput{ - KubeletFlags: []string{"--node-labels=app=foo"}, - AMIImageID: "ami-12345", - NodeGroupName: "ng-1", - }, - wantNodeLabels: []string{ - "app=foo", - "eks.amazonaws.com/nodegroup-image=ami-12345", - "eks.amazonaws.com/nodegroup=ng-1", - "eks.amazonaws.com/capacityType=ON_DEMAND", - }, - wantOtherFlags: nil, - }, - { - name: "existing eks-specific labels present", - in: &NodeadmInput{ - KubeletFlags: []string{"--node-labels=app=foo,eks.amazonaws.com/nodegroup=ng-1,eks.amazonaws.com/nodegroup-image=ami-12345,eks.amazonaws.com/capacityType=SPOT"}, - AMIImageID: "ami-12345", - NodeGroupName: "ng-1", - }, - wantNodeLabels: []string{ - "app=foo", - "eks.amazonaws.com/nodegroup=ng-1", - "eks.amazonaws.com/nodegroup-image=ami-12345", - "eks.amazonaws.com/capacityType=SPOT", - }, - wantOtherFlags: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.in.setKubeletFlags() - var gotNodeLabels []string - var gotOtherFlags []string - for _, flag := range tt.in.KubeletFlags { - if strings.HasPrefix(flag, "--node-labels=") { - labels := strings.TrimPrefix(flag, "--node-labels=") - gotNodeLabels = append(gotNodeLabels, strings.Split(labels, ",")...) - } else { - gotOtherFlags = append(gotOtherFlags, flag) - } - } - g.Expect(gotNodeLabels).To(ContainElements(tt.wantNodeLabels), "expected node-labels to contain %v, got %v", tt.wantNodeLabels, gotNodeLabels) - g.Expect(gotOtherFlags).To(ContainElements(tt.wantOtherFlags), "expected kubelet flags to contain %v, got %v", tt.wantOtherFlags, gotOtherFlags) - }) - } -} - func TestNodeadmUserdata(t *testing.T) { format.TruncatedDiff = false g := NewWithT(t) @@ -112,9 +37,6 @@ func TestNodeadmUserdata(t *testing.T) { input *NodeadmInput } - onDemandCapacity := v1beta2.ManagedMachinePoolCapacityTypeOnDemand - spotCapacity := v1beta2.ManagedMachinePoolCapacityTypeSpot - tests := []struct { name string args args @@ -128,7 +50,6 @@ func TestNodeadmUserdata(t *testing.T) { ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", }, }, expectErr: false, @@ -147,7 +68,6 @@ func TestNodeadmUserdata(t *testing.T) { ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", KubeletFlags: []string{ "--node-labels=node-role.undistro.io/infra=true", "--register-with-taints=dedicated=infra:NoSchedule", @@ -168,7 +88,6 @@ func TestNodeadmUserdata(t *testing.T) { ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", KubeletConfig: &runtime.RawExtension{ Raw: []byte(` evictionHard: @@ -191,7 +110,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", PreBootstrapCommands: []string{ "echo 'pre-bootstrap'", "yum install -y htop", @@ -213,7 +131,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://test-endpoint.eks.amazonaws.com", CACert: "test-cert", - NodeGroupName: "test-nodegroup", AMIImageID: "ami-123456", ServiceCIDR: "192.168.0.0/16", }, @@ -221,7 +138,6 @@ evictionHard: expectErr: false, verifyOutput: func(output string) bool { return strings.Contains(output, "cidr: 192.168.0.0/16") && - strings.Contains(output, "nodegroup-image=ami-123456") && strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") }, }, @@ -232,7 +148,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", NTP: &eksbootstrapv1.NTP{ Enabled: ptr.To(true), Servers: []string{"time.google.com"}, @@ -254,7 +169,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", Users: []eksbootstrapv1.User{ { Name: "testuser", @@ -278,7 +192,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", DiskSetup: &eksbootstrapv1.DiskSetup{ Filesystems: []eksbootstrapv1.Filesystem{ { @@ -306,7 +219,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", Mounts: []eksbootstrapv1.MountPoints{ {"/dev/disk/scsi1/lun0"}, {"/mnt/etcd"}, @@ -329,7 +241,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", Boundary: "CUSTOMBOUNDARY123", PreBootstrapCommands: []string{"echo 'pre-bootstrap'"}, NTP: &eksbootstrapv1.NTP{ @@ -357,7 +268,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", }, }, expectErr: false, @@ -378,7 +288,6 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", PreBootstrapCommands: []string{"echo 'test'"}, NTP: &eksbootstrapv1.NTP{ Enabled: ptr.To(true), @@ -393,83 +302,10 @@ evictionHard: strings.Contains(output, "Content-Type: application/node.eks.aws") && strings.Contains(output, "Content-Type: text/x-shellscript") && strings.Contains(output, "Content-Type: text/cloud-config") && + strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") && strings.Count(output, fmt.Sprintf("--%s", boundary)) == 5 // 3 parts * 2 boundaries each except cloud-config }, }, - { - name: "node-labels without capacityType - should add ON_DEMAND", - args: args{ - input: &NodeadmInput{ - ClusterName: "test-cluster", - APIServerEndpoint: "https://example.com", - CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", - AMIImageID: "ami-123456", - KubeletFlags: []string{ - "--node-labels=app=my-app,environment=production", - }, - CapacityType: nil, // Should default to ON_DEMAND - }, - }, - expectErr: false, - verifyOutput: func(output string) bool { - return strings.Contains(output, "app=my-app") && - strings.Contains(output, "environment=production") && - strings.Contains(output, "eks.amazonaws.com/capacityType=ON_DEMAND") && - strings.Contains(output, "eks.amazonaws.com/nodegroup-image=ami-123456") && - strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && - strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") - }, - }, - { - name: "node-labels with capacityType set to SPOT", - args: args{ - input: &NodeadmInput{ - ClusterName: "test-cluster", - APIServerEndpoint: "https://example.com", - CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", - AMIImageID: "ami-123456", - KubeletFlags: []string{ - "--node-labels=workload=batch", - }, - CapacityType: &spotCapacity, - }, - }, - expectErr: false, - verifyOutput: func(output string) bool { - return strings.Contains(output, "workload=batch") && - strings.Contains(output, "eks.amazonaws.com/nodegroup-image=ami-123456") && - strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && - strings.Contains(output, "eks.amazonaws.com/capacityType=SPOT") && - strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") - }, - }, - { - name: "no existing node-labels - should only add generated labels", - args: args{ - input: &NodeadmInput{ - ClusterName: "test-cluster", - APIServerEndpoint: "https://example.com", - CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", - AMIImageID: "ami-789012", - KubeletFlags: []string{ - "--max-pods=100", - }, - CapacityType: &spotCapacity, - }, - }, - expectErr: false, - verifyOutput: func(output string) bool { - return strings.Contains(output, "--node-labels") && - strings.Contains(output, "eks.amazonaws.com/nodegroup-image=ami-789012") && - strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && - strings.Contains(output, "eks.amazonaws.com/capacityType=SPOT") && - strings.Contains(output, `"--max-pods=100"`) && - strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") - }, - }, { name: "verify other kubelet flags are preserved with node-labels", args: args{ @@ -477,21 +313,17 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", KubeletFlags: []string{ "--node-labels=tier=workers", "--register-with-taints=dedicated=gpu:NoSchedule", "--max-pods=58", }, - CapacityType: &onDemandCapacity, }, }, expectErr: false, verifyOutput: func(output string) bool { return strings.Contains(output, "--node-labels") && strings.Contains(output, "tier=workers") && - strings.Contains(output, "eks.amazonaws.com/nodegroup=test-nodegroup") && - strings.Contains(output, "eks.amazonaws.com/capacityType=ON_DEMAND") && strings.Contains(output, `"--register-with-taints=dedicated=gpu:NoSchedule"`) && strings.Contains(output, `"--max-pods=58"`) && strings.Contains(output, "apiVersion: node.eks.aws/v1alpha1") @@ -511,9 +343,8 @@ evictionHard: name: "missing API server endpoint", args: args{ input: &NodeadmInput{ - ClusterName: "test-cluster", - CACert: "test-ca-cert", - NodeGroupName: "test-nodegroup", + ClusterName: "test-cluster", + CACert: "test-ca-cert", // Missing APIServerEndpoint }, }, @@ -525,31 +356,18 @@ evictionHard: input: &NodeadmInput{ ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", - NodeGroupName: "test-nodegroup", // Missing CACert }, }, expectErr: true, }, - { - name: "missing node group name", - args: args{ - input: &NodeadmInput{ - ClusterName: "test-cluster", - APIServerEndpoint: "https://example.com", - CACert: "test-ca-cert", - // Missing NodeGroupName - }, - }, - expectErr: true, - }, } for _, testcase := range tests { t.Run(testcase.name, func(t *testing.T) { bytes, err := NewNodeadmUserdata(testcase.args.input) if testcase.expectErr { - g.Expect(err).To(HaveOccurred(), "got error when exepcted none", err) + g.Expect(err).To(HaveOccurred()) return } From 3537aa5a37e4c429f69741fe3900cf79ec4d0622 Mon Sep 17 00:00:00 2001 From: faiq Date: Fri, 26 Sep 2025 11:23:30 -0700 Subject: [PATCH 17/18] fix: removes local store options which isn't supported by capa --- .../eks/api/v1beta2/nodeadmconfig_types.go | 51 ------------------- .../eks/api/v1beta2/zz_generated.deepcopy.go | 45 ---------------- .../controllers/nodeadmconfig_controller.go | 1 - bootstrap/eks/internal/userdata/nodeadm.go | 1 - ...strap.cluster.x-k8s.io_nodeadmconfigs.yaml | 37 -------------- ...uster.x-k8s.io_nodeadmconfigtemplates.yaml | 37 -------------- 6 files changed, 172 deletions(-) diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go index 5d7a3884a2..ac1e4c288e 100644 --- a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go +++ b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go @@ -17,10 +17,6 @@ type NodeadmConfigSpec struct { // +optional Containerd *ContainerdOptions `json:"containerd,omitempty"` - // Instance contains options for the node's operating system and devices. - // +optional - Instance *InstanceOptions `json:"instance,omitempty"` - // FeatureGates holds key-value pairs to enable or disable application features. // +optional FeatureGates map[Feature]bool `json:"featureGates,omitempty"` @@ -74,29 +70,6 @@ type ContainerdOptions struct { BaseRuntimeSpec *runtime.RawExtension `json:"baseRuntimeSpec,omitempty"` } -// InstanceOptions determines how the node's operating system and devices are configured. -type InstanceOptions struct { - // LocalStorage contains options for configuring EC2 instance stores. - // +optional - LocalStorage *LocalStorageOptions `json:"localStorage,omitempty"` -} - -// LocalStorageOptions control how EC2 instance stores are used when available. -type LocalStorageOptions struct { - // Strategy specifies how to handle an instance's local storage devices. - Strategy LocalStorageStrategy `json:"strategy"` - - // MountPath is the path where the filesystem will be mounted. - // Defaults to "/mnt/k8s-disks/". - // +optional - MountPath string `json:"mountPath,omitempty"` - - // DisabledMounts is a list of directories that will not be mounted to LocalStorage. - // By default, all mounts are enabled. - // +optional - DisabledMounts []DisabledMount `json:"disabledMounts,omitempty"` -} - // Feature specifies which feature gate should be toggled. // +kubebuilder:validation:Enum=InstanceIdNodeName;FastImagePull type Feature string @@ -108,30 +81,6 @@ const ( FeatureFastImagePull Feature = "FastImagePull" ) -// LocalStorageStrategy specifies how to handle an instance's local storage devices. -// +kubebuilder:validation:Enum=RAID0;RAID10;Mount -type LocalStorageStrategy string - -const ( - // RAID0Strategy is a local storage strategy for EKS nodes - RAID0Strategy LocalStorageStrategy = "RAID0" - // RAID10Strategy is a local storage strategy for EKS nodes - RAID10Strategy LocalStorageStrategy = "RAID10" - // MountStrategy is a local storage strategy for EKS nodes - MountStrategy LocalStorageStrategy = "Mount" -) - -// DisabledMount specifies a directory that should not be mounted onto local storage. -// +kubebuilder:validation:Enum=Containerd;PodLogs -type DisabledMount string - -const ( - // DisabledMountContainerd refers to /var/lib/containerd - DisabledMountContainerd DisabledMount = "Containerd" - // DisabledMountPodLogs refers to /var/log/pods - DisabledMountPodLogs DisabledMount = "PodLogs" -) - // GetConditions returns the observations of the operational state of the NodeadmConfig resource. func (r *NodeadmConfig) GetConditions() clusterv1.Conditions { return r.Status.Conditions diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go index fad9601e68..31907681f0 100644 --- a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go @@ -423,26 +423,6 @@ func (in *Filesystem) DeepCopy() *Filesystem { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstanceOptions) DeepCopyInto(out *InstanceOptions) { - *out = *in - if in.LocalStorage != nil { - in, out := &in.LocalStorage, &out.LocalStorage - *out = new(LocalStorageOptions) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceOptions. -func (in *InstanceOptions) DeepCopy() *InstanceOptions { - if in == nil { - return nil - } - out := new(InstanceOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeletOptions) DeepCopyInto(out *KubeletOptions) { *out = *in @@ -468,26 +448,6 @@ func (in *KubeletOptions) DeepCopy() *KubeletOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalStorageOptions) DeepCopyInto(out *LocalStorageOptions) { - *out = *in - if in.DisabledMounts != nil { - in, out := &in.DisabledMounts, &out.DisabledMounts - *out = make([]DisabledMount, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalStorageOptions. -func (in *LocalStorageOptions) DeepCopy() *LocalStorageOptions { - if in == nil { - return nil - } - out := new(LocalStorageOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in MountPoints) DeepCopyInto(out *MountPoints) { { @@ -604,11 +564,6 @@ func (in *NodeadmConfigSpec) DeepCopyInto(out *NodeadmConfigSpec) { *out = new(ContainerdOptions) (*in).DeepCopyInto(*out) } - if in.Instance != nil { - in, out := &in.Instance, &out.Instance - *out = new(InstanceOptions) - (*in).DeepCopyInto(*out) - } if in.FeatureGates != nil { in, out := &in.FeatureGates, &out.FeatureGates *out = make(map[Feature]bool, len(*in)) diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index fb45ad5885..a62cf65cd0 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -202,7 +202,6 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust nodeInput := &userdata.NodeadmInput{ // AWSManagedControlPlane webhooks default and validate EKSClusterName ClusterName: controlPlane.Spec.EKSClusterName, - Instance: config.Spec.Instance, PreBootstrapCommands: config.Spec.PreBootstrapCommands, Users: config.Spec.Users, NTP: config.Spec.NTP, diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index 19b52df1ed..081b63b588 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -143,7 +143,6 @@ type NodeadmInput struct { ContainerdConfig string ContainerdBaseRuntimeSpec *runtime.RawExtension FeatureGates map[eksbootstrapv1.Feature]bool - Instance *eksbootstrapv1.InstanceOptions PreBootstrapCommands []string Files []eksbootstrapv1.File diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml index 152b539b33..9080b91f9c 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml @@ -192,43 +192,6 @@ spec: - path type: object type: array - instance: - description: Instance contains options for the node's operating system - and devices. - properties: - localStorage: - description: LocalStorage contains options for configuring EC2 - instance stores. - properties: - disabledMounts: - description: |- - DisabledMounts is a list of directories that will not be mounted to LocalStorage. - By default, all mounts are enabled. - items: - description: DisabledMount specifies a directory that should - not be mounted onto local storage. - enum: - - Containerd - - PodLogs - type: string - type: array - mountPath: - description: |- - MountPath is the path where the filesystem will be mounted. - Defaults to "/mnt/k8s-disks/". - type: string - strategy: - description: Strategy specifies how to handle an instance's - local storage devices. - enum: - - RAID0 - - RAID10 - - Mount - type: string - required: - - strategy - type: object - type: object kubelet: description: Kubelet contains options for kubelet. properties: diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml index 45489bfd99..433fee76d0 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml @@ -209,43 +209,6 @@ spec: - path type: object type: array - instance: - description: Instance contains options for the node's operating - system and devices. - properties: - localStorage: - description: LocalStorage contains options for configuring - EC2 instance stores. - properties: - disabledMounts: - description: |- - DisabledMounts is a list of directories that will not be mounted to LocalStorage. - By default, all mounts are enabled. - items: - description: DisabledMount specifies a directory - that should not be mounted onto local storage. - enum: - - Containerd - - PodLogs - type: string - type: array - mountPath: - description: |- - MountPath is the path where the filesystem will be mounted. - Defaults to "/mnt/k8s-disks/". - type: string - strategy: - description: Strategy specifies how to handle an instance's - local storage devices. - enum: - - RAID0 - - RAID10 - - Mount - type: string - required: - - strategy - type: object - type: object kubelet: description: Kubelet contains options for kubelet. properties: From c434fec74af4c71c76ad16bf0c78344151d565b8 Mon Sep 17 00:00:00 2001 From: faiq Date: Fri, 26 Sep 2025 12:00:53 -0700 Subject: [PATCH 18/18] fix: use prenodeadm commands name --- .../eks/api/v1beta2/nodeadmconfig_types.go | 4 ++-- .../eks/api/v1beta2/zz_generated.deepcopy.go | 4 ++-- .../controllers/nodeadmconfig_controller.go | 18 ++++++++--------- bootstrap/eks/internal/userdata/nodeadm.go | 16 +++++++-------- .../eks/internal/userdata/nodeadm_test.go | 20 +++++++++---------- ...strap.cluster.x-k8s.io_nodeadmconfigs.yaml | 12 +++++------ ...uster.x-k8s.io_nodeadmconfigtemplates.yaml | 12 +++++------ .../managed/eks_upgrade_to_nodeadm_test.go | 2 +- 8 files changed, 44 insertions(+), 44 deletions(-) diff --git a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go index ac1e4c288e..52786dc286 100644 --- a/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go +++ b/bootstrap/eks/api/v1beta2/nodeadmconfig_types.go @@ -21,9 +21,9 @@ type NodeadmConfigSpec struct { // +optional FeatureGates map[Feature]bool `json:"featureGates,omitempty"` - // PreBootstrapCommands specifies extra commands to run before bootstrapping nodes. + // PreNodeadmCommands specifies extra commands to run before bootstrapping nodes. // +optional - PreBootstrapCommands []string `json:"preBootstrapCommands,omitempty"` + PreNodeadmCommands []string `json:"PreNodeadmCommands,omitempty"` // Files specifies extra files to be passed to user_data upon creation. // +optional diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go index 31907681f0..a60d606991 100644 --- a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go @@ -571,8 +571,8 @@ func (in *NodeadmConfigSpec) DeepCopyInto(out *NodeadmConfigSpec) { (*out)[key] = val } } - if in.PreBootstrapCommands != nil { - in, out := &in.PreBootstrapCommands, &out.PreBootstrapCommands + if in.PreNodeadmCommands != nil { + in, out := &in.PreNodeadmCommands, &out.PreNodeadmCommands *out = make([]string, len(*in)) copy(*out, *in) } diff --git a/bootstrap/eks/controllers/nodeadmconfig_controller.go b/bootstrap/eks/controllers/nodeadmconfig_controller.go index a62cf65cd0..48c5de256e 100644 --- a/bootstrap/eks/controllers/nodeadmconfig_controller.go +++ b/bootstrap/eks/controllers/nodeadmconfig_controller.go @@ -201,15 +201,15 @@ func (r *NodeadmConfigReconciler) joinWorker(ctx context.Context, cluster *clust } nodeInput := &userdata.NodeadmInput{ // AWSManagedControlPlane webhooks default and validate EKSClusterName - ClusterName: controlPlane.Spec.EKSClusterName, - PreBootstrapCommands: config.Spec.PreBootstrapCommands, - Users: config.Spec.Users, - NTP: config.Spec.NTP, - DiskSetup: config.Spec.DiskSetup, - Mounts: config.Spec.Mounts, - Files: files, - ServiceCIDR: serviceCIDR, - APIServerEndpoint: controlPlane.Spec.ControlPlaneEndpoint.Host, + ClusterName: controlPlane.Spec.EKSClusterName, + PreNodeadmCommands: config.Spec.PreNodeadmCommands, + Users: config.Spec.Users, + NTP: config.Spec.NTP, + DiskSetup: config.Spec.DiskSetup, + Mounts: config.Spec.Mounts, + Files: files, + ServiceCIDR: serviceCIDR, + APIServerEndpoint: controlPlane.Spec.ControlPlaneEndpoint.Host, } if config.Spec.Kubelet != nil { nodeInput.KubeletFlags = config.Spec.Kubelet.Flags diff --git a/bootstrap/eks/internal/userdata/nodeadm.go b/bootstrap/eks/internal/userdata/nodeadm.go index 081b63b588..264469bb0e 100644 --- a/bootstrap/eks/internal/userdata/nodeadm.go +++ b/bootstrap/eks/internal/userdata/nodeadm.go @@ -68,7 +68,7 @@ Content-Disposition: attachment; filename="commands.sh" set -o errexit set -o pipefail set -o nounset -{{- range .PreBootstrapCommands}} +{{- range .PreNodeadmCommands}} {{.}} {{- end}} --{{ .Boundary }}` @@ -144,12 +144,12 @@ type NodeadmInput struct { ContainerdBaseRuntimeSpec *runtime.RawExtension FeatureGates map[eksbootstrapv1.Feature]bool - PreBootstrapCommands []string - Files []eksbootstrapv1.File - DiskSetup *eksbootstrapv1.DiskSetup - Mounts []eksbootstrapv1.MountPoints - Users []eksbootstrapv1.User - NTP *eksbootstrapv1.NTP + PreNodeadmCommands []string + Files []eksbootstrapv1.File + DiskSetup *eksbootstrapv1.DiskSetup + Mounts []eksbootstrapv1.MountPoints + Users []eksbootstrapv1.User + NTP *eksbootstrapv1.NTP AMIImageID string APIServerEndpoint string @@ -193,7 +193,7 @@ func NewNodeadmUserdata(input *NodeadmInput) ([]byte, error) { } // Write shell script part if needed - if len(input.PreBootstrapCommands) > 0 { + if len(input.PreNodeadmCommands) > 0 { shellScriptTemplate := template.Must(template.New("shell").Parse(shellScriptPartTemplate)) if err := shellScriptTemplate.Execute(&buf, input); err != nil { return nil, fmt.Errorf("failed to execute shell script template: %v", err) diff --git a/bootstrap/eks/internal/userdata/nodeadm_test.go b/bootstrap/eks/internal/userdata/nodeadm_test.go index b490e60fe8..25e176a12d 100644 --- a/bootstrap/eks/internal/userdata/nodeadm_test.go +++ b/bootstrap/eks/internal/userdata/nodeadm_test.go @@ -110,7 +110,7 @@ evictionHard: ClusterName: "test-cluster", APIServerEndpoint: "https://example.com", CACert: "test-ca-cert", - PreBootstrapCommands: []string{ + PreNodeadmCommands: []string{ "echo 'pre-bootstrap'", "yum install -y htop", }, @@ -238,11 +238,11 @@ evictionHard: name: "boundary verification - all three parts with custom boundary", args: args{ input: &NodeadmInput{ - ClusterName: "test-cluster", - APIServerEndpoint: "https://example.com", - CACert: "test-ca-cert", - Boundary: "CUSTOMBOUNDARY123", - PreBootstrapCommands: []string{"echo 'pre-bootstrap'"}, + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + Boundary: "CUSTOMBOUNDARY123", + PreNodeadmCommands: []string{"echo 'pre-bootstrap'"}, NTP: &eksbootstrapv1.NTP{ Enabled: ptr.To(true), Servers: []string{"time.google.com"}, @@ -285,10 +285,10 @@ evictionHard: name: "boundary verification - all 3 parts", args: args{ input: &NodeadmInput{ - ClusterName: "test-cluster", - APIServerEndpoint: "https://example.com", - CACert: "test-ca-cert", - PreBootstrapCommands: []string{"echo 'test'"}, + ClusterName: "test-cluster", + APIServerEndpoint: "https://example.com", + CACert: "test-ca-cert", + PreNodeadmCommands: []string{"echo 'test'"}, NTP: &eksbootstrapv1.NTP{ Enabled: ptr.To(true), Servers: []string{"time.google.com"}, diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml index 9080b91f9c..5c3c4e9605 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigs.yaml @@ -39,6 +39,12 @@ spec: spec: description: NodeadmConfigSpec defines the desired state of NodeadmConfig. properties: + PreNodeadmCommands: + description: PreNodeadmCommands specifies extra commands to run before + bootstrapping nodes. + items: + type: string + type: array containerd: description: Containerd contains options for containerd. properties: @@ -227,12 +233,6 @@ spec: type: string type: array type: object - preBootstrapCommands: - description: PreBootstrapCommands specifies extra commands to run - before bootstrapping nodes. - items: - type: string - type: array users: description: Users specifies extra users to add. items: diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml index 433fee76d0..20d4450cc8 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_nodeadmconfigtemplates.yaml @@ -51,6 +51,12 @@ spec: spec: description: NodeadmConfigSpec defines the desired state of NodeadmConfig. properties: + PreNodeadmCommands: + description: PreNodeadmCommands specifies extra commands to + run before bootstrapping nodes. + items: + type: string + type: array containerd: description: Containerd contains options for containerd. properties: @@ -246,12 +252,6 @@ spec: type: string type: array type: object - preBootstrapCommands: - description: PreBootstrapCommands specifies extra commands - to run before bootstrapping nodes. - items: - type: string - type: array users: description: Users specifies extra users to add. items: diff --git a/test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go b/test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go index 973e3f325d..7abbe6ecb3 100644 --- a/test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go +++ b/test/e2e/suites/managed/eks_upgrade_to_nodeadm_test.go @@ -135,7 +135,7 @@ var _ = ginkgo.Describe("EKS Cluster upgrade test", func() { Spec: eksbootstrapv1.NodeadmConfigTemplateSpec{ Template: eksbootstrapv1.NodeadmConfigTemplateResource{ Spec: eksbootstrapv1.NodeadmConfigSpec{ - PreBootstrapCommands: []string{ + PreNodeadmCommands: []string{ "echo \"hello world\"", }, },