From 7cae8f44d7b6ccc0127d4697e9f374bb1bf9f4c4 Mon Sep 17 00:00:00 2001 From: RainbowMango Date: Thu, 11 Sep 2025 15:32:51 +0800 Subject: [PATCH] Sync APIs from karmada repo based on v1.15.0 Signed-off-by: RainbowMango --- cluster/types.go | 2 +- cluster/v1alpha1/cluster_helper.go | 45 ++++ cluster/v1alpha1/cluster_helper_test.go | 194 ++++++++++++++++++ cluster/v1alpha1/types.go | 2 +- config/v1alpha1/interpretercontext_types.go | 10 + .../resourceinterpretercustomization_types.go | 56 +++++ .../resourceinterpreterwebhook_types.go | 8 + config/v1alpha1/zz_generated.deepcopy.go | 28 +++ go.mod | 14 +- go.sum | 43 ++-- operator/v1alpha1/register.go | 2 +- operator/v1alpha1/type.go | 17 ++ operator/v1alpha1/zz_generated.deepcopy.go | 23 ++- policy/v1alpha1/clustertaint_types.go | 11 + policy/v1alpha1/propagation_types.go | 72 ++++++- policy/v1alpha1/zz_generated.deepcopy.go | 26 +++ work/v1alpha2/binding_types.go | 53 ++++- work/v1alpha2/zz_generated.deepcopy.go | 56 +++++ 18 files changed, 621 insertions(+), 41 deletions(-) diff --git a/cluster/types.go b/cluster/types.go index 189eca7..7867707 100644 --- a/cluster/types.go +++ b/cluster/types.go @@ -60,7 +60,7 @@ type ClusterSpec struct { // - compose the DNS name of multi-cluster services. // // +optional - // +kubebuilder:validation:Maxlength=128000 + // +kubebuilder:validation:MaxLength=128000 ID string `json:"id,omitempty"` // SyncMode describes how a cluster syncs resources from karmada control plane. diff --git a/cluster/v1alpha1/cluster_helper.go b/cluster/v1alpha1/cluster_helper.go index 6e30e4c..f4996a6 100644 --- a/cluster/v1alpha1/cluster_helper.go +++ b/cluster/v1alpha1/cluster_helper.go @@ -16,7 +16,52 @@ limitations under the License. package v1alpha1 +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + // String returns a well-formatted string for the Cluster object. func (c *Cluster) String() string { return c.Name } + +// APIEnablementStatus is the status of the specific API on the cluster. +type APIEnablementStatus string + +const ( + // APIEnabled means the cluster supports the specified API. + APIEnabled APIEnablementStatus = "APIEnabled" + // APIDisabled means the cluster does not support the specified API. + APIDisabled APIEnablementStatus = "APIDisabled" + // APIUnknown means it is unknown whether the cluster supports the specified API. + APIUnknown APIEnablementStatus = "APIUnknown" +) + +// APIEnablement checks if the target API (or CRD) referenced by gvk has been installed in the cluster. +// The check takes the CompleteAPIEnablements condition into account. If the CompleteAPIEnablements condition indicates +// the current APIEnablements is Partial, it returns APIEnabled if the gvk is found in the list; otherwise, the status is considered APIUnknown. +// This means that when the APIEnablements is Partial and the gvk is not present, we cannot definitively say the API is disabled. +func (c *Cluster) APIEnablement(gvk schema.GroupVersionKind) APIEnablementStatus { + targetGroupVersion := gvk.GroupVersion().String() + for _, apiEnablement := range c.Status.APIEnablements { + if apiEnablement.GroupVersion != targetGroupVersion { + continue + } + for _, resource := range apiEnablement.Resources { + if resource.Kind != gvk.Kind { + continue + } + return APIEnabled + } + } + + // If we have the complete APIEnablements list for the cluster, + // we can confidently determine that the API is disabled if it was not found above. + if meta.IsStatusConditionPresentAndEqual(c.Status.Conditions, ClusterConditionCompleteAPIEnablements, metav1.ConditionTrue) { + return APIDisabled + } + + return APIUnknown +} diff --git a/cluster/v1alpha1/cluster_helper_test.go b/cluster/v1alpha1/cluster_helper_test.go index ba94f0a..1f3766c 100644 --- a/cluster/v1alpha1/cluster_helper_test.go +++ b/cluster/v1alpha1/cluster_helper_test.go @@ -21,6 +21,7 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" ) func TestString(t *testing.T) { @@ -64,3 +65,196 @@ func TestString(t *testing.T) { }) } } + +func TestAPIEnablement(t *testing.T) { + tests := []struct { + name string + cluster *Cluster + gvk schema.GroupVersionKind + expected APIEnablementStatus + }{ + { + name: "API enabled - exact match found", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []APIResource{ + {Name: "deployments", Kind: "Deployment"}, + {Name: "replicasets", Kind: "ReplicaSet"}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + gvk: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, + expected: APIEnabled, + }, + { + name: "API disabled - not found in complete list", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []APIResource{ + {Name: "deployments", Kind: "Deployment"}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + gvk: schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}, + expected: APIDisabled, + }, + { + name: "API unknown - not found in partial list", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []APIResource{ + {Name: "deployments", Kind: "Deployment"}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + gvk: schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}, + expected: APIUnknown, + }, + { + name: "API unknown - no CompleteAPIEnablements condition", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []APIResource{ + {Name: "deployments", Kind: "Deployment"}, + }, + }, + }, + Conditions: []metav1.Condition{}, + }, + }, + gvk: schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}, + expected: APIUnknown, + }, + { + name: "API enabled - found in core group", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{ + { + GroupVersion: "v1", + Resources: []APIResource{ + {Name: "pods", Kind: "Pod"}, + {Name: "services", Kind: "Service"}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + gvk: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, + expected: APIEnabled, + }, + { + name: "API disabled - wrong kind in same group version", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []APIResource{ + {Name: "deployments", Kind: "Deployment"}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + gvk: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}, + expected: APIDisabled, + }, + { + name: "API disabled - empty APIEnablements with complete condition", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{}, + Conditions: []metav1.Condition{ + { + Type: ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + gvk: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, + expected: APIDisabled, + }, + { + name: "API enabled - custom resource found", + cluster: &Cluster{ + Status: ClusterStatus{ + APIEnablements: []APIEnablement{ + { + GroupVersion: "example.com/v1alpha1", + Resources: []APIResource{ + {Name: "customresources", Kind: "CustomResource"}, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + gvk: schema.GroupVersionKind{Group: "example.com", Version: "v1alpha1", Kind: "CustomResource"}, + expected: APIEnabled, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.cluster.APIEnablement(tt.gvk) + if result != tt.expected { + t.Errorf("APIEnablement() = %v, want %v", result, tt.expected) + } + }) + } +} diff --git a/cluster/v1alpha1/types.go b/cluster/v1alpha1/types.go index 8a06b94..9332c73 100644 --- a/cluster/v1alpha1/types.go +++ b/cluster/v1alpha1/types.go @@ -71,7 +71,7 @@ type ClusterSpec struct { // - compose the DNS name of multi-cluster services. // // +optional - // +kubebuilder:validation:Maxlength=128000 + // +kubebuilder:validation:MaxLength=128000 ID string `json:"id,omitempty"` // SyncMode describes how a cluster syncs resources from karmada control plane. diff --git a/config/v1alpha1/interpretercontext_types.go b/config/v1alpha1/interpretercontext_types.go index 64ddcf8..5e6fa7d 100644 --- a/config/v1alpha1/interpretercontext_types.go +++ b/config/v1alpha1/interpretercontext_types.go @@ -118,6 +118,16 @@ type ResourceInterpreterResponse struct { // +optional Replicas *int32 `json:"replicas,omitempty"` + // Components represents the requirements of multiple pod templates of the referencing resource. + // It is designed to support workloads that consist of multiple pod templates, + // such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment), + // where each workload is composed of more than one pod template. It is also capable of representing + // single-component workloads, such as Deployment. + // + // Required if InterpreterOperation is InterpreterOperationInterpretComponent. + // +optional + Components []workv1alpha2.Component `json:"components,omitempty"` + // Dependencies represents the reference of dependencies object. // Required if InterpreterOperation is InterpreterOperationInterpretDependency. // +optional diff --git a/config/v1alpha1/resourceinterpretercustomization_types.go b/config/v1alpha1/resourceinterpretercustomization_types.go index 4da59ae..2752c71 100644 --- a/config/v1alpha1/resourceinterpretercustomization_types.go +++ b/config/v1alpha1/resourceinterpretercustomization_types.go @@ -97,6 +97,17 @@ type CustomizationRules struct { // +optional ReplicaResource *ReplicaResourceRequirement `json:"replicaResource,omitempty"` + // ComponentResource describes the rules for Karmada to discover the resource requirements + // for multiple components from the given object. + // This is designed for CRDs with multiple components (e.g., FlinkDeployment), but + // can also be used for single-component resources like Deployment. + // If implemented, the controller will use this to obtain per-component replica and resource + // requirements, and will not call ReplicaResource. + // If not implemented, the controller will fall back to ReplicaResource for backward compatibility. + // This will only be used when the feature gate 'MultiplePodTemplatesScheduling' is enabled. + // +optional + ComponentResource *ComponentResourceRequirement `json:"componentResource,omitempty"` + // ReplicaRevision describes the rules for Karmada to revise the resource's replica. // It would be useful for those CRD resources that declare workload types like // Deployment. @@ -203,6 +214,51 @@ type ReplicaResourceRequirement struct { LuaScript string `json:"luaScript"` } +// ComponentResourceRequirement holds the scripts for extracting the desired replica count +// and resource requirements for each component within a resource. This is particularly useful for +// resources that define multiple components (such as CRDs with multiple pod templates), but can also +// be used for single-component resources. +type ComponentResourceRequirement struct { + // LuaScript holds the Lua script that is used to extract the desired replica count and resource + // requirements for each component of the resource. + // + // The script should implement a function as follows: + // + // ``` + // luaScript: > + // function GetComponents(desiredObj) + // local components = {} + // + // local jobManagerComponent = { + // name = "jobmanager", + // replicas = desiredObj.spec.jobManager.replicas + // } + // table.insert(components, jobManagerComponent) + // + // local taskManagerComponent = { + // name = "taskmanager", + // replicas = desiredObj.spec.taskManager.replicas + // } + // table.insert(components, taskManagerComponent) + // + // return components + // end + // ``` + // + // The content of the LuaScript needs to be a whole function including both + // declaration and implementation. + // + // The parameters will be supplied by the system: + // - desiredObj: the object represents the configuration to be applied + // to the member cluster. + // + // The function expects one return value: + // - components: the resource requirements for each component. + // The returned value will be set into a ResourceBinding or ClusterResourceBinding. + // +required + LuaScript string `json:"luaScript"` +} + // ReplicaRevision holds the scripts for revising the desired replicas. type ReplicaRevision struct { // LuaScript holds the Lua script that is used to revise replicas in the desired specification. diff --git a/config/v1alpha1/resourceinterpreterwebhook_types.go b/config/v1alpha1/resourceinterpreterwebhook_types.go index eae94dd..dad865a 100644 --- a/config/v1alpha1/resourceinterpreterwebhook_types.go +++ b/config/v1alpha1/resourceinterpreterwebhook_types.go @@ -124,6 +124,14 @@ const ( // Only necessary for those resource types that have replica declaration, like Deployment or similar custom resources. InterpreterOperationInterpretReplica InterpreterOperation = "InterpretReplica" + // InterpreterOperationInterpretComponent indicates that karmada wants to figure out + // resource requirements for multiple components from a given object. + // This operation is designed for CRDs with multiple components (e.g., FlinkDeployment), + // but can also be used for single-component resources. + // If an interpreter supports this operation, 'InterpretReplica' will not be called. + // This operation is only used when the feature gate 'MultiplePodTemplatesScheduling' is enabled. + InterpreterOperationInterpretComponent InterpreterOperation = "InterpretComponent" + // InterpreterOperationReviseReplica indicates that karmada request webhook to modify the replica. InterpreterOperationReviseReplica InterpreterOperation = "ReviseReplica" diff --git a/config/v1alpha1/zz_generated.deepcopy.go b/config/v1alpha1/zz_generated.deepcopy.go index d51cf13..b10c654 100644 --- a/config/v1alpha1/zz_generated.deepcopy.go +++ b/config/v1alpha1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentResourceRequirement) DeepCopyInto(out *ComponentResourceRequirement) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentResourceRequirement. +func (in *ComponentResourceRequirement) DeepCopy() *ComponentResourceRequirement { + if in == nil { + return nil + } + out := new(ComponentResourceRequirement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomizationRules) DeepCopyInto(out *CustomizationRules) { *out = *in @@ -40,6 +56,11 @@ func (in *CustomizationRules) DeepCopyInto(out *CustomizationRules) { *out = new(ReplicaResourceRequirement) **out = **in } + if in.ComponentResource != nil { + in, out := &in.ComponentResource, &out.ComponentResource + *out = new(ComponentResourceRequirement) + **out = **in + } if in.ReplicaRevision != nil { in, out := &in.ReplicaRevision, &out.ReplicaRevision *out = new(ReplicaRevision) @@ -387,6 +408,13 @@ func (in *ResourceInterpreterResponse) DeepCopyInto(out *ResourceInterpreterResp *out = new(int32) **out = **in } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]v1alpha2.Component, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Dependencies != nil { in, out := &in.Dependencies, &out.Dependencies *out = make([]DependentObjectReference, len(*in)) diff --git a/go.mod b/go.mod index 41af652..999522e 100644 --- a/go.mod +++ b/go.mod @@ -1,20 +1,19 @@ module github.com/karmada-io/api -go 1.23.8 +go 1.24.6 require ( - k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.3 + k8s.io/api v0.33.2 + k8s.io/apiextensions-apiserver v0.33.2 + k8s.io/apimachinery v0.33.2 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.21.0 ) require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -25,6 +24,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 741f558..5a4b902 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,7 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -10,11 +9,9 @@ github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -28,17 +25,16 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -79,21 +75,24 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= +k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= +k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8= +k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8= +k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= +k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/operator/v1alpha1/register.go b/operator/v1alpha1/register.go index d09d8aa..43e412e 100644 --- a/operator/v1alpha1/register.go +++ b/operator/v1alpha1/register.go @@ -50,7 +50,7 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) + localSchemeBuilder.Register(addKnownTypes) } // Adds the list of known types to Scheme. diff --git a/operator/v1alpha1/type.go b/operator/v1alpha1/type.go index 931ab1a..52b4a1b 100644 --- a/operator/v1alpha1/type.go +++ b/operator/v1alpha1/type.go @@ -58,7 +58,24 @@ const ( // HTTPSource specifies how to download the CRD tarball via either HTTP or HTTPS protocol. type HTTPSource struct { // URL specifies the URL of the CRD tarball resource. + // +required URL string `json:"url,omitempty"` + + // Proxy specifies the configuration of a proxy server to use when downloading the CRD tarball. + // When set, the operator will use the configuration to determine how to establish a connection to the proxy to fetch the tarball from the URL specified above. + // This is useful in environments where direct access to the server hosting the CRD tarball is restricted and a proxy must be used to reach that server. + // If a proxy configuration is not set, the operator will attempt to download the tarball directly from the URL specified above without using a proxy. + // +optional + Proxy *ProxyConfig `json:"proxy,omitempty"` +} + +// ProxyConfig defines the configuration for a proxy server to use when downloading a CRD tarball. +type ProxyConfig struct { + // ProxyURL specifies the HTTP/HTTPS proxy server URL to use when downloading the CRD tarball. + // This is useful in environments where direct access to the server hosting the CRD tarball is restricted and a proxy must be used to reach that server. + // The format should be a valid URL, e.g., "http://proxy.example.com:8080". + // +required + ProxyURL string `json:"proxyURL"` } // CRDTarball specifies the source from which the Karmada CRD tarball should be downloaded, along with the download policy to use. diff --git a/operator/v1alpha1/zz_generated.deepcopy.go b/operator/v1alpha1/zz_generated.deepcopy.go index 69c643e..ca15fc1 100644 --- a/operator/v1alpha1/zz_generated.deepcopy.go +++ b/operator/v1alpha1/zz_generated.deepcopy.go @@ -49,7 +49,7 @@ func (in *CRDTarball) DeepCopyInto(out *CRDTarball) { if in.HTTPSource != nil { in, out := &in.HTTPSource, &out.HTTPSource *out = new(HTTPSource) - **out = **in + (*in).DeepCopyInto(*out) } if in.CRDDownloadPolicy != nil { in, out := &in.CRDDownloadPolicy, &out.CRDDownloadPolicy @@ -198,6 +198,11 @@ func (in *ExternalEtcd) DeepCopy() *ExternalEtcd { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPSource) DeepCopyInto(out *HTTPSource) { *out = *in + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyConfig) + **out = **in + } return } @@ -860,6 +865,22 @@ func (in *Networking) DeepCopy() *Networking { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeData) DeepCopyInto(out *VolumeData) { *out = *in diff --git a/policy/v1alpha1/clustertaint_types.go b/policy/v1alpha1/clustertaint_types.go index d2faf18..556f888 100644 --- a/policy/v1alpha1/clustertaint_types.go +++ b/policy/v1alpha1/clustertaint_types.go @@ -21,6 +21,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // ResourceKindClusterTaintPolicy is kind name of ClusterTaintPolicy. + ResourceKindClusterTaintPolicy = "ClusterTaintPolicy" + // ResourceSingularClusterTaintPolicy is singular name of ClusterTaintPolicy. + ResourceSingularClusterTaintPolicy = "clustertaintpolicy" + // ResourcePluralClusterTaintPolicy is plural name of ClusterTaintPolicy. + ResourcePluralClusterTaintPolicy = "clustertaintpolicies" + // ResourceNamespaceScopedClusterTaintPolicy indicates if ClusterTaintPolicy is NamespaceScoped. + ResourceNamespaceScopedClusterTaintPolicy = false +) + // +genclient // +genclient:nonNamespaced // +kubebuilder:resource:path=clustertaintpolicies,scope="Cluster" diff --git a/policy/v1alpha1/propagation_types.go b/policy/v1alpha1/propagation_types.go index f0bbef4..30b55da 100644 --- a/policy/v1alpha1/propagation_types.go +++ b/policy/v1alpha1/propagation_types.go @@ -289,14 +289,40 @@ const ( // at the same time. During a failover, it is crucial to ensure that the old // application is removed before creating a new one to avoid duplicate // processing and maintaining state consistency. + // + // Deprecated: The term `Immediately` may be confusing when used alongside + // `GracePeriodSeconds`, which specifies that resources are removed after + // a grace period rather than at once. + // `Immediately` is replaced by `Directly` for clarity. This term remains + // functional in the current API version for backward compatibility and will + // be removed when PropagationPolicy advances to alpha2 or beta. Immediately PurgeMode = "Immediately" // Graciously represents that Karmada will wait for the application to // come back to healthy on the new cluster or after a timeout is reached // before evicting the application. + // + // Deprecated: The term `Graciously` is replaced by `Gracefully` for correct + // English usage. This term remains functional in the current API version for + // backward compatibility and will be removed when PropagationPolicy advances + // to alpha2 or beta. Graciously PurgeMode = "Graciously" // Never represents that Karmada will not evict the application and // users manually confirms how to clean up redundant copies. Never PurgeMode = "Never" + + // PurgeModeDirectly represents that Karmada will directly evict the legacy + // application. This is useful in scenarios where an application can not + // tolerate two instances running simultaneously. + // For example, the Flink application supports exactly-once state consistency, + // which means it requires that no two instances of the application are running + // at the same time. During a failover, it is crucial to ensure that the old + // application is removed before creating a new one to avoid duplicate + // processing and maintaining state consistency. + PurgeModeDirectly PurgeMode = "Directly" + // PurgeModeGracefully represents that Karmada will wait for the application to + // come back to healthy on the new cluster or after a timeout is reached + // before evicting the application. + PurgeModeGracefully PurgeMode = "Gracefully" ) // FailoverBehavior indicates failover behaviors in case of an application or @@ -310,9 +336,13 @@ type FailoverBehavior struct { Application *ApplicationFailoverBehavior `json:"application,omitempty"` // Cluster indicates failover behaviors in case of cluster failure. - // If this value is nil, failover is disabled. + // If this value is nil, the failover behavior in case of cluster failure + // will be controlled by the controller's no-execute-taint-eviction-purge-mode + // parameter. + // If set, the failover behavior in case of cluster failure will be defined + // by this value. // +optional - // Cluster *ClusterFailoverBehavior `json:"cluster,omitempty"` + Cluster *ClusterFailoverBehavior `json:"cluster,omitempty"` } // ApplicationFailoverBehavior indicates application failover behaviors. @@ -326,10 +356,11 @@ type ApplicationFailoverBehavior struct { // PurgeMode represents how to deal with the legacy applications on the // cluster from which the application is migrated. - // Valid options are "Immediately", "Graciously" and "Never". - // Defaults to "Graciously". - // +kubebuilder:validation:Enum=Immediately;Graciously;Never - // +kubebuilder:default=Graciously + // Valid options are "Directly", "Gracefully", "Never", "Immediately"(deprecated), + // and "Graciously"(deprecated). + // Defaults to "Gracefully". + // +kubebuilder:validation:Enum=Directly;Gracefully;Never;Immediately;Graciously + // +kubebuilder:default=Gracefully // +optional PurgeMode PurgeMode `json:"purgeMode,omitempty"` @@ -360,6 +391,35 @@ type ApplicationFailoverBehavior struct { StatePreservation *StatePreservation `json:"statePreservation,omitempty"` } +// ClusterFailoverBehavior indicates cluster failover behaviors. +type ClusterFailoverBehavior struct { + // PurgeMode represents how to deal with the legacy applications on the + // cluster from which the application is migrated. + // Valid options are "Directly", "Gracefully". + // Defaults to "Gracefully". + // +kubebuilder:validation:Enum=Directly;Gracefully + // +kubebuilder:default=Gracefully + // +optional + PurgeMode PurgeMode `json:"purgeMode,omitempty"` + + // StatePreservation defines the policy for preserving and restoring state data + // during failover events for stateful applications. + // + // When an application fails over from one cluster to another, this policy enables + // the extraction of critical data from the original resource configuration. + // Upon successful migration, the extracted data is then re-injected into the new + // resource, ensuring that the application can resume operation with its previous + // state intact. + // This is particularly useful for stateful applications where maintaining data + // consistency across failover events is crucial. + // If not specified, means no state data will be preserved. + // + // Note: This requires the StatefulFailoverInjection feature gate to be enabled, + // which is alpha. + // +optional + StatePreservation *StatePreservation `json:"statePreservation,omitempty"` +} + // DecisionConditions represents the decision conditions of performing the failover process. type DecisionConditions struct { // TolerationSeconds represents the period of time Karmada should wait diff --git a/policy/v1alpha1/zz_generated.deepcopy.go b/policy/v1alpha1/zz_generated.deepcopy.go index 2688536..fca575a 100644 --- a/policy/v1alpha1/zz_generated.deepcopy.go +++ b/policy/v1alpha1/zz_generated.deepcopy.go @@ -107,6 +107,27 @@ func (in *ClusterAffinityTerm) DeepCopy() *ClusterAffinityTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterFailoverBehavior) DeepCopyInto(out *ClusterFailoverBehavior) { + *out = *in + if in.StatePreservation != nil { + in, out := &in.StatePreservation, &out.StatePreservation + *out = new(StatePreservation) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFailoverBehavior. +func (in *ClusterFailoverBehavior) DeepCopy() *ClusterFailoverBehavior { + if in == nil { + return nil + } + out := new(ClusterFailoverBehavior) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterOverridePolicy) DeepCopyInto(out *ClusterOverridePolicy) { *out = *in @@ -417,6 +438,11 @@ func (in *FailoverBehavior) DeepCopyInto(out *FailoverBehavior) { *out = new(ApplicationFailoverBehavior) (*in).DeepCopyInto(*out) } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(ClusterFailoverBehavior) + (*in).DeepCopyInto(*out) + } return } diff --git a/work/v1alpha2/binding_types.go b/work/v1alpha2/binding_types.go index 81cc8da..c9670df 100644 --- a/work/v1alpha2/binding_types.go +++ b/work/v1alpha2/binding_types.go @@ -86,6 +86,17 @@ type ResourceBindingSpec struct { // +optional Replicas int32 `json:"replicas,omitempty"` + // Components represents the requirements of multiple pod templates of the referencing resource. + // It is designed to support workloads that consist of multiple pod templates, + // such as distributed training jobs (e.g., PyTorch, TensorFlow) and big data workloads (e.g., FlinkDeployment), + // where each workload is composed of more than one pod template. It is also capable of representing + // single-component workloads, such as Deployment. + // + // Note: This field is intended to replace the legacy ReplicaRequirements and Replicas fields above. + // It is only populated when the MultiplePodTemplatesScheduling feature gate is enabled. + // +optional + Components []Component `json:"components,omitempty"` + // Clusters represents target member clusters where the resource to be deployed. // +optional Clusters []TargetCluster `json:"clusters,omitempty"` @@ -212,6 +223,39 @@ type ReplicaRequirements struct { PriorityClassName string `json:"priorityClassName,omitempty"` } +// Component represents the requirements for a specific component. +type Component struct { + // Name of this component. + // It is required when the resource contains multiple components to ensure proper identification, + // and must also be unique within the same resource. + // +kubebuilder:validation:MaxLength=32 + // +required + Name string `json:"name"` + + // Replicas represents the replica number of the resource's component. + // +required + Replicas int32 `json:"replicas"` + + // ReplicaRequirements represents the requirements required by each replica for this component. + // +optional + ReplicaRequirements *ComponentReplicaRequirements `json:"replicaRequirements,omitempty"` +} + +// ComponentReplicaRequirements represents the requirements required by each replica. +type ComponentReplicaRequirements struct { + // NodeClaim represents the node claim HardNodeAffinity, NodeSelector and Tolerations required by each replica. + // +optional + NodeClaim *NodeClaim `json:"nodeClaim,omitempty"` + + // ResourceRequest represents the resources required by each replica. + // +optional + ResourceRequest corev1.ResourceList `json:"resourceRequest,omitempty"` + + // PriorityClassName represents the resources priorityClassName + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` +} + // NodeClaim represents the node claim HardNodeAffinity, NodeSelector and Tolerations required by each replica. type NodeClaim struct { // A node selector represents the union of the results of one or more label queries over a set of @@ -246,8 +290,8 @@ type GracefulEvictionTask struct { // PurgeMode represents how to deal with the legacy applications on the // cluster from which the application is migrated. - // Valid options are "Immediately", "Graciously" and "Never". - // +kubebuilder:validation:Enum=Immediately;Graciously;Never + // Valid options are "Immediately", "Directly", "Graciously", "Gracefully" and "Never". + // +kubebuilder:validation:Enum=Immediately;Directly;Graciously;Gracefully;Never // +optional PurgeMode policyv1alpha1.PurgeMode `json:"purgeMode,omitempty"` @@ -432,6 +476,11 @@ const ( // BindingReasonUnschedulable reason in Scheduled condition means that the scheduler can't schedule // the binding right now, for example due to insufficient resources in the clusters. BindingReasonUnschedulable = "Unschedulable" + + // BindingReasonQuotaExceeded reason in Scheduled condition means that the scheduler can't schedule + // the binding because the resource requirement exceeds one or more of the FederatedResourceQuotas + // defined in the namespace. + BindingReasonQuotaExceeded = "QuotaExceeded" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/work/v1alpha2/zz_generated.deepcopy.go b/work/v1alpha2/zz_generated.deepcopy.go index 45932cc..72c19e3 100644 --- a/work/v1alpha2/zz_generated.deepcopy.go +++ b/work/v1alpha2/zz_generated.deepcopy.go @@ -131,6 +131,55 @@ func (in *ClusterResourceBindingList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Component) DeepCopyInto(out *Component) { + *out = *in + if in.ReplicaRequirements != nil { + in, out := &in.ReplicaRequirements, &out.ReplicaRequirements + *out = new(ComponentReplicaRequirements) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Component. +func (in *Component) DeepCopy() *Component { + if in == nil { + return nil + } + out := new(Component) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentReplicaRequirements) DeepCopyInto(out *ComponentReplicaRequirements) { + *out = *in + if in.NodeClaim != nil { + in, out := &in.NodeClaim, &out.NodeClaim + *out = new(NodeClaim) + (*in).DeepCopyInto(*out) + } + if in.ResourceRequest != nil { + in, out := &in.ResourceRequest, &out.ResourceRequest + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentReplicaRequirements. +func (in *ComponentReplicaRequirements) DeepCopy() *ComponentReplicaRequirements { + if in == nil { + return nil + } + out := new(ComponentReplicaRequirements) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GracefulEvictionTask) DeepCopyInto(out *GracefulEvictionTask) { *out = *in @@ -327,6 +376,13 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) { *out = new(ReplicaRequirements) (*in).DeepCopyInto(*out) } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]Component, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Clusters != nil { in, out := &in.Clusters, &out.Clusters *out = make([]TargetCluster, len(*in))