diff --git a/cmd/clustertree/cluster-manager/app/manager.go b/cmd/clustertree/cluster-manager/app/manager.go index 78ddea2ed..1fc699619 100644 --- a/cmd/clustertree/cluster-manager/app/manager.go +++ b/cmd/clustertree/cluster-manager/app/manager.go @@ -7,6 +7,7 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -22,6 +23,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers" "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/mcs" podcontrollers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote" "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pv" "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pvc" nodeserver "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/node-server" @@ -161,6 +163,12 @@ func run(ctx context.Context, opts *options.Options) error { return err } + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + klog.Errorf("Unable to create discoveryClient: %v", err) + return err + } + // add cluster controller clusterController := clusterManager.ClusterController{ Root: mgr.GetClient(), @@ -262,6 +270,18 @@ func run(ctx context.Context, opts *options.Options) error { } } + promotePolicyController := promote.PromotePolicyController{ + RootClient: mgr.GetClient(), + RootClientSet: rootClient, + RootDynamicClient: dynamicClient, + RootDiscoveryClient: discoveryClient, + GlobalLeafManager: globalleafManager, + PromotePolicyOptions: opts.PromotePolicyOptions, + } + if err = promotePolicyController.SetupWithManager(mgr); err != nil { + return fmt.Errorf("error starting %s: %v", promote.PromotePolicyControllerName, err) + } + // init commonController for i, gvr := range controllers.SYNC_GVRS { commonController := controllers.SyncResourcesReconciler{ diff --git a/cmd/clustertree/cluster-manager/app/options/options.go b/cmd/clustertree/cluster-manager/app/options/options.go index a2b031f71..27a1ceb9a 100644 --- a/cmd/clustertree/cluster-manager/app/options/options.go +++ b/cmd/clustertree/cluster-manager/app/options/options.go @@ -50,6 +50,8 @@ type Options struct { BackoffOpts flags.BackoffOptions SyncPeriod time.Duration + + PromotePolicyOptions PromotePolicyOptions } type KubernetesOptions struct { @@ -59,6 +61,11 @@ type KubernetesOptions struct { Burst int `json:"burst,omitempty" yaml:"burst,omitempty"` } +type PromotePolicyOptions struct { + // ExcludeNamespaces are the ns name excluded by default when you need to sync leaf cluster resources + ForbidNamespaces []string +} + func NewOptions() (*Options, error) { var leaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&leaderElection) @@ -93,6 +100,7 @@ func (o *Options) AddFlags(flags *pflag.FlagSet) { flags.StringSliceVar(&o.AutoCreateMCSPrefix, "auto-mcs-prefix", []string{}, "The prefix of namespace for service to auto create mcs resources") flags.StringSliceVar(&o.ReservedNamespaces, "reserved-namespaces", []string{"kube-system"}, "The namespaces protected by Kosmos that the controller-manager will skip.") flags.DurationVar(&o.SyncPeriod, "sync-period", 0, "the sync period for informer to resync.") + flags.StringSliceVar(&o.PromotePolicyOptions.ForbidNamespaces, "forbid-promote-namespace", []string{}, "This is forbidden to promote namespace") o.RateLimiterOpts.AddFlags(flags) o.BackoffOpts.AddFlags(flags) options.BindLeaderElectionFlags(&o.LeaderElection, flags) diff --git a/deploy/crds/kosmos.io_promotepolicies.yaml b/deploy/crds/kosmos.io_promotepolicies.yaml new file mode 100644 index 000000000..3b1d76e40 --- /dev/null +++ b/deploy/crds/kosmos.io_promotepolicies.yaml @@ -0,0 +1,131 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.0 + creationTimestamp: null + name: promotepolicies.kosmos.io +spec: + group: kosmos.io + names: + kind: PromotePolicy + listKind: PromotePolicyList + plural: promotepolicies + singular: promotepolicy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: PromotePolicy is custom resource that represents the capture + of sync leaf cluster + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PromotePolicySpec defines the desired state of promotePolicy + properties: + clusterName: + description: Cluster is a cluster that needs to be migrated + type: string + excludedNamespaceScopedResources: + description: ExcludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to exclude from the backup. If set to "*", all + namespace-scoped resource types are excluded. The default value + is empty. + items: + type: string + nullable: true + type: array + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that + are not included in the backup. + items: + type: string + nullable: true + type: array + includedNamespaceScopedResources: + description: IncludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to include in the backup. The default value + is "*". + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to include + objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + rollback: + description: Rollback set true, then rollback from the backup + nullable: true + type: string + type: object + status: + description: PromotePolicyStatus defines the observed state of promotePolicy + properties: + backedupFile: + type: string + completionTimestamp: + description: CompletionTimestamp records the time a sync was completed. + Completion time is recorded even on failed sync. The server's time + is used for CompletionTimestamps + format: date-time + nullable: true + type: string + failureReason: + description: FailureReason is an error that caused the entire sync + to fail. + type: string + phase: + description: Phase is the current state of the Backup. + type: string + precheckErrors: + description: PrecheckErrors is a slice of all precheck errors (if + applicable). + items: + type: string + nullable: true + type: array + progress: + description: Progress contains information about the sync's execution + progress. Note that this information is best-effort only -- if fails + to update it for any reason, it may be inaccurate/stale. + nullable: true + properties: + itemsBackedUp: + description: ItemsBackedUp is the number of items that have actually + been written to the backup tarball so far. + type: integer + totalItems: + description: TotalItems is the total number of items to be backed + up. This number may change throughout the execution of the backup + due to plugins that return additional related items to back + up, the velero.io/exclude-from-backup label, and various other + filters that happen as items are processed. + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a sync was started. The + server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + served: true + storage: true diff --git a/examples/promote_policy_demo.yaml b/examples/promote_policy_demo.yaml new file mode 100644 index 000000000..e87d6d4e5 --- /dev/null +++ b/examples/promote_policy_demo.yaml @@ -0,0 +1,21 @@ +apiVersion: kosmos.io/v1alpha1 +kind: PromotePolicy +metadata: + name: promote-pilicy-sample +spec: + includedNamespaces: + - test + - kube-system + excludedNamespaces: + - kube-system + includedNamespaceScopedResources: + - pods + - daemonsets.apps +# - serviceexports.multicluster.x-k8s.io + - nodeconfigs.kosmos.io + excludedNamespaceScopedResources: +# - pods +# - nodeconfigs.kosmos.io + clusterName: + cluster7 + diff --git a/go.mod b/go.mod index cadaa3ff4..c153fe488 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,6 @@ require ( k8s.io/kube-scheduler v0.0.0 k8s.io/kubectl v0.26.3 k8s.io/kubernetes v1.13.0 - k8s.io/metrics v0.26.3 k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 sigs.k8s.io/controller-runtime v0.14.5 sigs.k8s.io/mcs-api v0.1.0 diff --git a/pkg/apis/kosmos/v1alpha1/promotepolicy_types.go b/pkg/apis/kosmos/v1alpha1/promotepolicy_types.go new file mode 100644 index 000000000..8ddaee5a2 --- /dev/null +++ b/pkg/apis/kosmos/v1alpha1/promotepolicy_types.go @@ -0,0 +1,166 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PromotePolicySpec defines the desired state of promotePolicy +type PromotePolicySpec struct { + // Cluster is a cluster that needs to be migrated + ClusterName string `json:"clusterName,omitempty"` + + // IncludedNamespaces is a slice of namespace names to include objects + // from. If empty, all namespaces are included. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces contains a list of namespaces that are not + // included in the backup. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedNamespaceScopedResources is a slice of namespace-scoped + // resource type names to include in the backup. + // The default value is "*". + // +optional + // +nullable + IncludedNamespaceScopedResources []string `json:"includedNamespaceScopedResources,omitempty"` + + // ExcludedNamespaceScopedResources is a slice of namespace-scoped + // resource type names to exclude from the backup. + // If set to "*", all namespace-scoped resource types are excluded. + // The default value is empty. + // +optional + // +nullable + ExcludedNamespaceScopedResources []string `json:"excludedNamespaceScopedResources,omitempty"` + + // Rollback set true, then rollback from the backup + // +optional + // +nullable + Rollback string `json:"rollback,omitempty"` +} + +// PromotePolicyPhase is a string representation of the lifecycle phase +type PromotePolicyPhase string + +const ( + // PromotePolicyPhasePrecheck means in precheck progess + PromotePolicyPhasePrecheck PromotePolicyPhase = "Prechecking" + + // PromotePolicyPhaseFailedPrecheck means precheck has failed + PromotePolicyPhaseFailedPrecheck PromotePolicyPhase = "FailedPrecheck" + + // PromotePolicyPhaseBackup means in backup progess + PromotePolicyPhaseBackup PromotePolicyPhase = "Backuping" + + // PromotePolicyPhaseFailedBackup means backup has failed + PromotePolicyPhaseFailedBackup PromotePolicyPhase = "FailedBackup" + + // PromotePolicyPhaseDetach means in detach progess + PromotePolicyPhaseDetach PromotePolicyPhase = "Detaching" + + // PromotePolicyPhaseFailedDetach means detach has failed + PromotePolicyPhaseFailedDetach PromotePolicyPhase = "FailedDetach" + + // PromotePolicyPhaseRestore means in restore progess + PromotePolicyPhaseRestore PromotePolicyPhase = "Restoring" + + // PromotePolicyPhaseFailedRestore means restore has failed + PromotePolicyPhaseFailedRestore PromotePolicyPhase = "FailedRestore" + + // PromotePolicyPhaseFailedRollback means rollback has failed + PromotePolicyPhaseFailedRollback PromotePolicyPhase = "FailedRollback" + + // PromotePolicyPhaseRolledback means rollback has successed + PromotePolicyPhaseRolledback PromotePolicyPhase = "RolledBack" + + // PromotePolicyPhaseCompleted means the sync has run successfully + PromotePolicyPhaseCompleted PromotePolicyPhase = "Completed" +) + +// BackupProgress stores information about the progress of a Backup's execution. +type PromotePolicyProgress struct { + // TotalItems is the total number of items to be backed up. This number may change + // throughout the execution of the backup due to plugins that return additional related + // items to back up, the velero.io/exclude-from-backup label, and various other + // filters that happen as items are processed. + // +optional + TotalItems int `json:"totalItems,omitempty"` + + // ItemsBackedUp is the number of items that have actually been written to the + // backup tarball so far. + // +optional + ItemsBackedUp int `json:"itemsBackedUp,omitempty"` +} + +// PromotePolicyStatus defines the observed state of promotePolicy +type PromotePolicyStatus struct { + // Phase is the current state of the Backup. + // +optional + Phase PromotePolicyPhase `json:"phase,omitempty"` + + // PrecheckErrors is a slice of all precheck errors (if + // applicable). + // +optional + // +nullable + PrecheckErrors []string `json:"precheckErrors,omitempty"` + + // StartTimestamp records the time a sync was started. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a sync was completed. + // Completion time is recorded even on failed sync. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // FailureReason is an error that caused the entire sync to fail. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // Progress contains information about the sync's execution progress. Note + // that this information is best-effort only -- if fails to update it for any reason, it may be inaccurate/stale. + // +optional + // +nullable + Progress *PromotePolicyProgress `json:"progress,omitempty"` + + BackedupFile string `json:"backedupFile,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=create;delete;get;list;patch;update;watch +// +kubebuilder:rbac:groups=velero.io,resources=backups/status,verbs=get;update;patch + +// PromotePolicy is custom resource that represents the capture of sync leaf cluster +type PromotePolicy struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PromotePolicySpec `json:"spec,omitempty"` + + Status PromotePolicyStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupList is a list of promotePolicys. +type PromotePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PromotePolicy `json:"items"` +} diff --git a/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go index 79942057a..3af4fb91a 100644 --- a/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go @@ -1331,6 +1331,153 @@ func (in *PromoteResources) DeepCopy() *PromoteResources { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromotePolicy) DeepCopyInto(out *PromotePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromotePolicy. +func (in *PromotePolicy) DeepCopy() *PromotePolicy { + if in == nil { + return nil + } + out := new(PromotePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PromotePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromotePolicyList) DeepCopyInto(out *PromotePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PromotePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromotePolicyList. +func (in *PromotePolicyList) DeepCopy() *PromotePolicyList { + if in == nil { + return nil + } + out := new(PromotePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PromotePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromotePolicyProgress) DeepCopyInto(out *PromotePolicyProgress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromotePolicyProgress. +func (in *PromotePolicyProgress) DeepCopy() *PromotePolicyProgress { + if in == nil { + return nil + } + out := new(PromotePolicyProgress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromotePolicySpec) DeepCopyInto(out *PromotePolicySpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedNamespaceScopedResources != nil { + in, out := &in.IncludedNamespaceScopedResources, &out.IncludedNamespaceScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaceScopedResources != nil { + in, out := &in.ExcludedNamespaceScopedResources, &out.ExcludedNamespaceScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromotePolicySpec. +func (in *PromotePolicySpec) DeepCopy() *PromotePolicySpec { + if in == nil { + return nil + } + out := new(PromotePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromotePolicyStatus) DeepCopyInto(out *PromotePolicyStatus) { + *out = *in + if in.PrecheckErrors != nil { + in, out := &in.PrecheckErrors, &out.PrecheckErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Progress != nil { + in, out := &in.Progress, &out.Progress + *out = new(PromotePolicyProgress) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromotePolicyStatus. +func (in *PromotePolicyStatus) DeepCopy() *PromotePolicyStatus { + if in == nil { + return nil + } + out := new(PromotePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Proxy) DeepCopyInto(out *Proxy) { *out = *in diff --git a/pkg/apis/kosmos/v1alpha1/zz_generated.register.go b/pkg/apis/kosmos/v1alpha1/zz_generated.register.go index 9439e97d8..e2e0bb172 100644 --- a/pkg/apis/kosmos/v1alpha1/zz_generated.register.go +++ b/pkg/apis/kosmos/v1alpha1/zz_generated.register.go @@ -60,6 +60,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &NodeConfigList{}, &PodConvertPolicy{}, &PodConvertPolicyList{}, + &PromotePolicy{}, + &PromotePolicyList{}, &Proxy{}, &ShadowDaemonSet{}, &ShadowDaemonSetList{}, diff --git a/pkg/clustertree/cluster-manager/cluster_controller.go b/pkg/clustertree/cluster-manager/cluster_controller.go index f294e817f..8fa6d0195 100644 --- a/pkg/clustertree/cluster-manager/cluster_controller.go +++ b/pkg/clustertree/cluster-manager/cluster_controller.go @@ -12,6 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -141,6 +142,11 @@ func (c *ClusterController) Reconcile(ctx context.Context, request reconcile.Req return reconcile.Result{}, fmt.Errorf("could not build dynamic client for cluster %s: %v", cluster.Name, err) } + leafDiscovery, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not build discovery client for cluster %s: %v", cluster.Name, err) + } + leafKosmosClient, err := kosmosversioned.NewForConfig(config) if err != nil { return reconcile.Result{}, fmt.Errorf("could not build kosmos clientset for cluster %s: %v", cluster.Name, err) @@ -206,7 +212,7 @@ func (c *ClusterController) Reconcile(ctx context.Context, request reconcile.Req c.ManagerCancelFuncs[cluster.Name] = &cancel c.ControllerManagersLock.Unlock() - if err = c.setupControllers(mgr, cluster, nodes, leafDynamic, leafNodeSelectors, leafClient, leafKosmosClient, config); err != nil { + if err = c.setupControllers(mgr, cluster, nodes, leafDynamic, leafDiscovery, leafNodeSelectors, leafClient, leafKosmosClient, config); err != nil { return reconcile.Result{}, fmt.Errorf("failed to setup cluster %s controllers: %v", cluster.Name, err) } @@ -240,16 +246,18 @@ func (c *ClusterController) setupControllers( cluster *kosmosv1alpha1.Cluster, nodes []*corev1.Node, clientDynamic *dynamic.DynamicClient, + discoveryClient *discovery.DiscoveryClient, leafNodeSelector map[string]kosmosv1alpha1.NodeSelector, leafClientset kubernetes.Interface, leafKosmosClient kosmosversioned.Interface, leafRestConfig *rest.Config) error { c.GlobalLeafManager.AddLeafResource(&leafUtils.LeafResource{ - Client: mgr.GetClient(), - DynamicClient: clientDynamic, - Clientset: leafClientset, - KosmosClient: leafKosmosClient, - ClusterName: cluster.Name, + Client: mgr.GetClient(), + DynamicClient: clientDynamic, + DiscoveryClient: discoveryClient, + Clientset: leafClientset, + KosmosClient: leafKosmosClient, + ClusterName: cluster.Name, // TODO: define node options Namespace: "", IgnoreLabels: strings.Split("", ","), diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go index 0a368d63a..baf010abf 100644 --- a/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go @@ -3,7 +3,6 @@ package pod import ( "context" - "github.com/google/go-cmp/cmp" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -147,12 +146,13 @@ func (r *LeafPodReconciler) SetupWithManager(mgr manager.Manager) error { return skipFunc(createEvent.Object) }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { - pod1 := updateEvent.ObjectOld.(*corev1.Pod) - pod2 := updateEvent.ObjectNew.(*corev1.Pod) - if !skipFunc(updateEvent.ObjectNew) { - return false - } - return !cmp.Equal(pod1.Status, pod2.Status) + return skipFunc(updateEvent.ObjectNew) + //pod1 := updateEvent.ObjectOld.(*corev1.Pod) + //pod2 := updateEvent.ObjectNew.(*corev1.Pod) + //if !skipFunc(updateEvent.ObjectNew) { + // return false + //} + //return !cmp.Equal(pod1.Status, pod2.Status) }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { return skipFunc(deleteEvent.Object) diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go b/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go index 124b8f888..9a62939f8 100644 --- a/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go @@ -214,6 +214,7 @@ func (r *RootPodReconciler) Reconcile(ctx context.Context, request reconcile.Req // update pod in leaf if podutils.ShouldEnqueue(leafPod, &rootpod) { if err := r.UpdatePodInLeafCluster(ctx, lr, &rootpod, leafPod, r.GlobalLeafManager.GetClusterNode(rootpod.Spec.NodeName).LeafNodeSelector); err != nil { + klog.Errorf("Error Update pod in leafcluster. %v", err) return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil } } @@ -559,73 +560,122 @@ func (r *RootPodReconciler) changeToMasterCoreDNS(ctx context.Context, pod *core } } -// projectedHandler Process the project volume, creating and mounting secret, configmap, DownwardAPI, +// createProjectedHandler Process the project volume, creating and mounting secret, configmap, DownwardAPI, // and ServiceAccountToken from the project volume in the member cluster to the pod of the host cluster -func (r *RootPodReconciler) projectedHandler(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) { +func (r *RootPodReconciler) createProjectedHandler(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) { if pod.Spec.Volumes == nil { return } for _, volume := range pod.Spec.Volumes { if volume.Projected != nil { - falseValue := false - pod.Spec.AutomountServiceAccountToken = &falseValue - - saName := pod.Spec.ServiceAccountName - var sources []corev1.VolumeProjection - - for _, projectedVolumeSource := range volume.Projected.Sources { - // Process all resources for the rootpod - if projectedVolumeSource.ServiceAccountToken != nil { - tokenSecretName, err := r.createSATokenInLeafCluster(ctx, lr, saName, pod) - if err != nil { - klog.Errorf("[convertAuth] create sa secret failed, ns: %s, pod: %s, err: %s", pod.Namespace, pod.Name, err) - return - } - secretProjection := corev1.VolumeProjection{ - Secret: &corev1.SecretProjection{ - Items: []corev1.KeyToPath{ - { - Key: "token", - Path: projectedVolumeSource.ServiceAccountToken.Path, - }, - }, - }, - } - secretProjection.Secret.Name = tokenSecretName - sources = append(sources, secretProjection) - } - if projectedVolumeSource.ConfigMap != nil { - cmName, err := r.createConfigMapInLeafCluster(ctx, lr, projectedVolumeSource.ConfigMap.Name, pod) - if err != nil { - klog.Errorf("[convertAuth] create configmap failed, ns: %s, cm: %s, err: %s", pod.Namespace, cmName, err) - return - } - cmDeepCopy := projectedVolumeSource.DeepCopy() - cmDeepCopy.ConfigMap.Name = cmName - sources = append(sources, *cmDeepCopy) - } - if projectedVolumeSource.Secret != nil { - Secret := projectedVolumeSource.Secret - seName, err := r.createSecretInLeafCluster(ctx, lr, Secret.Name, pod) - if err != nil { - klog.Errorf("[convertAuth] create secret failed, ns: %s, cm: %s, err: %s", pod.Namespace, seName, err) - return - } - secretDeepCopy := projectedVolumeSource.DeepCopy() - secretDeepCopy.Secret.Name = seName - sources = append(sources, *secretDeepCopy) - } - if projectedVolumeSource.DownwardAPI != nil { - DownwardAPIProjection := corev1.VolumeProjection{ - DownwardAPI: projectedVolumeSource.DownwardAPI, - } - sources = append(sources, DownwardAPIProjection) + if sources := r.projectedHandler(ctx, lr, volume, pod); sources != nil { + volume.Projected.Sources = sources + } + } + } +} + +// updateProjectedHandler update projected volume +func (r *RootPodReconciler) updateProjectedHandler(ctx context.Context, lr *leafUtils.LeafResource, rootPod, podCopy *corev1.Pod) { + if rootPod.Spec.Volumes == nil { + return + } + var leafPodVolumes []corev1.Volume + if podCopy.Spec.Volumes == nil { + leafPodVolumes = nil + } else { + leafPodVolumes = podCopy.Spec.Volumes + } + + var volumeCopy []corev1.Volume + + for _, volume := range rootPod.Spec.Volumes { + if volume.Projected != nil { + if _, flag := findVolumeInClient(volume, leafPodVolumes); !flag { + if sources := r.projectedHandler(ctx, lr, volume, podCopy); sources != nil { + volume.Projected.Sources = sources } } - volume.Projected.Sources = sources + } + volumeCopy = append(volumeCopy, volume) + } + podCopy.Spec.Volumes = volumeCopy +} + +func (r *RootPodReconciler) projectedHandler(ctx context.Context, lr *leafUtils.LeafResource, volume corev1.Volume, pod *corev1.Pod) []corev1.VolumeProjection { + falseValue := false + pod.Spec.AutomountServiceAccountToken = &falseValue + + saName := pod.Spec.ServiceAccountName + var sources []corev1.VolumeProjection + + for _, projectedVolumeSource := range volume.Projected.Sources { + // Process all resources for the rootpod + if projectedVolumeSource.ServiceAccountToken != nil { + tokenSecretName, err := r.createSATokenInLeafCluster(ctx, lr, saName, pod) + if err != nil { + klog.Errorf("[convertAuth] create sa secret failed, ns: %s, pod: %s, err: %s", pod.Namespace, pod.Name, err) + return nil + } + secretProjection := corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + Items: []corev1.KeyToPath{ + { + Key: "token", + Path: projectedVolumeSource.ServiceAccountToken.Path, + }, + }, + }, + } + secretProjection.Secret.Name = tokenSecretName + sources = append(sources, secretProjection) + } + if projectedVolumeSource.ConfigMap != nil { + cmName, err := r.createConfigMapInLeafCluster(ctx, lr, projectedVolumeSource.ConfigMap.Name, pod) + if err != nil { + klog.Errorf("[convertAuth] create configmap failed, ns: %s, cm: %s, err: %s", pod.Namespace, cmName, err) + return nil + } + cmDeepCopy := projectedVolumeSource.DeepCopy() + cmDeepCopy.ConfigMap.Name = cmName + sources = append(sources, *cmDeepCopy) + } + if projectedVolumeSource.Secret != nil { + Secret := projectedVolumeSource.Secret + seName, err := r.createSecretInLeafCluster(ctx, lr, Secret.Name, pod) + if err != nil { + klog.Errorf("[convertAuth] create secret failed, ns: %s, cm: %s, err: %s", pod.Namespace, seName, err) + return nil + } + secretDeepCopy := projectedVolumeSource.DeepCopy() + secretDeepCopy.Secret.Name = seName + sources = append(sources, *secretDeepCopy) + } + if projectedVolumeSource.DownwardAPI != nil { + DownwardAPIProjection := corev1.VolumeProjection{ + DownwardAPI: projectedVolumeSource.DownwardAPI, + } + sources = append(sources, DownwardAPIProjection) + } + } + return sources +} + +func findVolumeInClient(volumeInRoot corev1.Volume, volumes []corev1.Volume) (corev1.Volume, bool) { + if volumes == nil { + return corev1.Volume{}, false + } + + for _, volume := range volumes { + if volume.Projected != nil && volume.Name == volumeInRoot.Name { + if reflect.DeepEqual(volume.Projected, volumeInRoot.Projected) { + return volume, true + } } } + + return corev1.Volume{}, false } // createServiceAccountInLeafCluster Create an sa corresponding to token-secret in member cluster @@ -881,7 +931,7 @@ func (r *RootPodReconciler) CreatePodInLeafCluster(ctx context.Context, lr *leaf klog.V(4).Infof("Creating Volumes successed %+v", basicPod) } - r.projectedHandler(ctx, lr, basicPod) + r.createProjectedHandler(ctx, lr, basicPod) if !r.Options.MultiClusterService { r.changeToMasterCoreDNS(ctx, basicPod, r.Options) @@ -919,10 +969,11 @@ func (r *RootPodReconciler) UpdatePodInLeafCluster(ctx context.Context, lr *leaf if reflect.DeepEqual(leafPod.Spec, podCopy.Spec) && reflect.DeepEqual(leafPod.Annotations, podCopy.Annotations) && reflect.DeepEqual(leafPod.Labels, podCopy.Labels) { + klog.V(4).Info("Skipping leaf pod update") return nil } - r.projectedHandler(ctx, lr, podCopy) + r.updateProjectedHandler(ctx, lr, rootPod, podCopy) if !r.Options.MultiClusterService { r.changeToMasterCoreDNS(ctx, podCopy, r.Options) diff --git a/pkg/clustertree/cluster-manager/controllers/promote/backup/backup.go b/pkg/clustertree/cluster-manager/controllers/promote/backup/backup.go new file mode 100644 index 000000000..7e94ea2bc --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/backup/backup.go @@ -0,0 +1,180 @@ +package backup + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + unstructured2 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + kubeerrs "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/constants" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/discovery" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/collections" +) + +type kubernetesBackupper struct { + request *requests.PromoteRequest + dynamicFactory client.DynamicFactory + discoveryHelper discovery.Helper + actions map[string]BackupItemAction + kubeclient kubernetes.Interface +} + +func NewKubernetesBackupper(request *requests.PromoteRequest) (*kubernetesBackupper, error) { + actions, err := registerBackupActions() + if err != nil { + return nil, err + } + dynamicFactory := client.NewDynamicFactory(request.LeafDynamicClient) + discoveryHelper, err := discovery.NewHelper(request.LeafDiscoveryClient) + if err != nil { + return nil, err + } + + return &kubernetesBackupper{ + request: request, + kubeclient: request.LeafClientSet, + dynamicFactory: dynamicFactory, + discoveryHelper: discoveryHelper, + actions: actions, + }, nil +} + +func (kb *kubernetesBackupper) Backup(backupFile io.Writer) error { + gzippedData := gzip.NewWriter(backupFile) + defer func(gzippedData *gzip.Writer) { + _ = gzippedData.Close() + }(gzippedData) + + tw := tar.NewWriter(gzippedData) + defer func(tw *tar.Writer) { + _ = tw.Close() + }(tw) + + klog.Info("Writing backup version file") + if err := kb.writeBackupVersion(tw); err != nil { + return errors.WithStack(err) + } + + kb.request.ResourceIncludesExcludes = collections.GetScopeResourceIncludesExcludes(kb.discoveryHelper, kb.request.Spec.IncludedNamespaceScopedResources, + kb.request.Spec.ExcludedNamespaceScopedResources, nil, nil, *kb.request.NamespaceIncludesExcludes) + + // set up a temp dir for the itemCollector to use to temporarily + // store items as they're scraped from the API. + tempDir, err := os.MkdirTemp("", "") + if err != nil { + return errors.Wrap(err, "error creating temp dir for backup") + } + defer func(path string) { + _ = os.RemoveAll(path) + }(tempDir) + + collector := &itemCollector{ + request: kb.request, + discoveryHelper: kb.discoveryHelper, + dynamicFactory: kb.dynamicFactory, + dir: tempDir, + } + + items := collector.getAllItems() + klog.Infof("Collected %d items matching the backup spec from the Kubernetes API (actual number of items backed up may be more or less depending on velero.io/exclude-from-backup annotation, plugins returning additional related items to back up, etc.)", len(items)) + + itemBackupper := &itemBackupper{ + request: kb.request, + backup: kb, + tarWriter: tw, + dynamicFactory: kb.dynamicFactory, + discoveryHelper: kb.discoveryHelper, + actions: kb.actions, + } + + backedUpGroupResources := map[schema.GroupResource]bool{} + + for _, item := range items { + klog.Infof("Processing item. resource: %s, namespace: %s, name: %s", item.groupResource.String(), item.namespace, item.name) + + // use an anonymous func so we can defer-close/remove the file + // as soon as we're done with it + func() { + var unstructured unstructured2.Unstructured + + f, err := os.Open(item.path) + if err != nil { + klog.Errorf("Error opening file containing item. %v", errors.WithStack(err)) + return + } + defer f.Close() + defer os.Remove(f.Name()) + + if err := json.NewDecoder(f).Decode(&unstructured); err != nil { + klog.Errorf("Error decoding JSON from file. %v", errors.WithStack(err)) + return + } + + if backedUp := kb.backupItem(item.groupResource, itemBackupper, &unstructured, item.preferredGVR); backedUp { + backedUpGroupResources[item.groupResource] = true + } + }() + } + + return nil +} + +func (kb *kubernetesBackupper) backupItem(gr schema.GroupResource, itemBackupper *itemBackupper, unstructured *unstructured2.Unstructured, preferredGVR schema.GroupVersionResource) bool { + backedUpItem, _, err := itemBackupper.backupItem(unstructured, gr, preferredGVR, true, false) + if aggregate, ok := err.(kubeerrs.Aggregate); ok { + klog.Infof("%d errors encountered backup up item %s", len(aggregate.Errors()), unstructured.GetName()) + // log each error separately so we get error location info in the log, and an + // accurate count of errors + for _, err = range aggregate.Errors() { + klog.Errorf("Error backing up item %s. %v", unstructured.GetName(), err) + } + + return false + } + + if err != nil { + klog.Errorf("Error backing up item %s. %v", unstructured.GetName(), err) + return false + } + return backedUpItem +} + +func (kb *kubernetesBackupper) writeBackupVersion(tw *tar.Writer) error { + versionFile := filepath.Join(constants.MetadataDir, "version") + versionString := fmt.Sprintf("%s\n", constants.BackupFormatVersion) + + hdr := &tar.Header{ + Name: versionFile, + Size: int64(len(versionString)), + Typeflag: tar.TypeReg, + Mode: 0755, + ModTime: time.Now(), + } + if err := tw.WriteHeader(hdr); err != nil { + return errors.WithStack(err) + } + if _, err := tw.Write([]byte(versionString)); err != nil { + return errors.WithStack(err) + } + + return nil +} + +type tarWriter interface { + io.Closer + Write([]byte) (int, error) + WriteHeader(*tar.Header) error +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/backup/item_backupper.go b/pkg/clustertree/cluster-manager/controllers/promote/backup/item_backupper.go new file mode 100644 index 000000000..5593250b5 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/backup/item_backupper.go @@ -0,0 +1,191 @@ +package backup + +import ( + "archive/tar" + "time" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/discovery" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/archive" +) + +// itemBackupper can back up individual items to a tar writer. +type itemBackupper struct { + request *requests.PromoteRequest + backup *kubernetesBackupper + tarWriter tarWriter + dynamicFactory client.DynamicFactory + discoveryHelper discovery.Helper + actions map[string]BackupItemAction +} + +type FileForArchive struct { + FilePath string + Header *tar.Header + FileBytes []byte +} + +func (ib *itemBackupper) backupItem(obj runtime.Unstructured, groupResource schema.GroupResource, + preferredGVR schema.GroupVersionResource, mustInclude, finalize bool) (bool, []FileForArchive, error) { + selectedForBackup, files, err := ib.backupItemInternal(obj, groupResource, preferredGVR, mustInclude, finalize) + if !selectedForBackup || err != nil || len(files) == 0 || finalize { + return selectedForBackup, files, err + } + + for _, file := range files { + if err := ib.tarWriter.WriteHeader(file.Header); err != nil { + return false, []FileForArchive{}, errors.WithStack(err) + } + + if _, err := ib.tarWriter.Write(file.FileBytes); err != nil { + return false, []FileForArchive{}, errors.WithStack(err) + } + } + + return true, []FileForArchive{}, nil +} + +func (ib *itemBackupper) backupItemInternal(obj runtime.Unstructured, groupResource schema.GroupResource, + preferredGVR schema.GroupVersionResource, mustInclude, finalize bool) (bool, []FileForArchive, error) { + var itemFiles []FileForArchive + metadata, err := meta.Accessor(obj) + if err != nil { + return false, itemFiles, err + } + + namespace := metadata.GetNamespace() + name := metadata.GetName() + + key := types.ItemKey{ + Resource: groupResource.String(), + Namespace: namespace, + Name: name, + } + if _, exists := ib.request.BackedUpItems[key]; exists { + klog.Infof("Skipping item %s %s because it's already been backed up.", groupResource.String(), name) + return true, itemFiles, nil + } + ib.request.BackedUpItems[key] = struct{}{} + + klog.Infof("backup item name:%s, resouces: %s, namespace", name, groupResource.String(), namespace) + + if mustInclude { + klog.Infof("Skipping the exclusion checks for this resource") + } + + if metadata.GetDeletionTimestamp() != nil { + klog.Info("Skipping item because it's being deleted.") + return false, itemFiles, nil + } + + // capture the version of the object before invoking plugin actions as the plugin may update + // the group version of the object. + versionPath := resourceVersion(obj) + + updatedObj, additionalItemFiles, err := ib.executeActions(obj, groupResource, name, namespace) + if err != nil { + return false, itemFiles, errors.WithStack(err) + } + + itemFiles = append(itemFiles, additionalItemFiles...) + obj = updatedObj + if metadata, err = meta.Accessor(obj); err != nil { + return false, itemFiles, errors.WithStack(err) + } + // update name and namespace in case they were modified in an action + name = metadata.GetName() + namespace = metadata.GetNamespace() + + itemBytes, err := json.Marshal(obj.UnstructuredContent()) + if err != nil { + return false, itemFiles, errors.WithStack(err) + } + + //if versionPath == preferredGVR.Version { + // // backing up preferred version backup without API Group version - for backward compatibility + // log.Infof("Resource %s/%s, version= %s, preferredVersion=%s", groupResource.String(), name, versionPath, preferredGVR.Version) + // itemFiles = append(itemFiles, getFileForArchive(namespace, name, groupResource.String(), "", itemBytes)) + // versionPath = versionPath + constants.PreferredVersionDir + //} + + itemFiles = append(itemFiles, getFileForArchive(namespace, name, groupResource.String(), versionPath, itemBytes)) + return true, itemFiles, nil +} + +func (ib *itemBackupper) executeActions(obj runtime.Unstructured, groupResource schema.GroupResource, + name, namespace string) (runtime.Unstructured, []FileForArchive, error) { + var itemFiles []FileForArchive + + if action, ok := ib.actions[groupResource.String()]; ok { + klog.Info("execute action for %s", groupResource.String()) + updatedItem, additionalItemIdentifiers, err := action.Execute(obj, ib.backup) + if err != nil { + return nil, itemFiles, errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name) + } + u := &unstructured.Unstructured{Object: updatedItem.UnstructuredContent()} + obj = u + + for _, additionalItem := range additionalItemIdentifiers { + gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion("")) + if err != nil { + return nil, itemFiles, err + } + + client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace) + if err != nil { + return nil, itemFiles, err + } + + item, err := client.Get(additionalItem.Name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + klog.Warningf("Additional item was not found in Kubernetes API, can't back it up. groupResouces: %s, namespace: %s, name: %s", + additionalItem.GroupResource, additionalItem.Namespace, additionalItem.Name) + continue + } + if err != nil { + return nil, itemFiles, errors.WithStack(err) + } + + _, additionalItemFiles, err := ib.backupItem(item, gvr.GroupResource(), gvr, true, false) + if err != nil { + return nil, itemFiles, err + } + itemFiles = append(itemFiles, additionalItemFiles...) + } + } + + return obj, itemFiles, nil +} + +func getFileForArchive(namespace, name, groupResource, versionPath string, itemBytes []byte) FileForArchive { + filePath := archive.GetItemFilePath("", groupResource, namespace, name) + + hdr := &tar.Header{ + Name: filePath, + Size: int64(len(itemBytes)), + Typeflag: tar.TypeReg, + Mode: 0755, + ModTime: time.Now(), + } + return FileForArchive{FilePath: filePath, Header: hdr, FileBytes: itemBytes} +} + +// resourceVersion returns a string representing the object's API Version (e.g. +// v1 if item belongs to apps/v1 +func resourceVersion(obj runtime.Unstructured) string { + gvk := obj.GetObjectKind().GroupVersionKind() + return gvk.Version +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/backup/item_collector.go b/pkg/clustertree/cluster-manager/controllers/promote/backup/item_collector.go new file mode 100644 index 000000000..ba404747b --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/backup/item_collector.go @@ -0,0 +1,326 @@ +package backup + +import ( + "os" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/discovery" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/kuberesource" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/collections" +) + +// itemCollector collects items from the Kubernetes API according to +// the backup spec and writes them to files inside dir. +type itemCollector struct { + request *requests.PromoteRequest + discoveryHelper discovery.Helper + dynamicFactory client.DynamicFactory + dir string +} + +type kubernetesResource struct { + groupResource schema.GroupResource + preferredGVR schema.GroupVersionResource + namespace, name, path string +} + +// These constants represent the relative priorities for resources in the core API group. We want to +// ensure that we process pods, then pvcs, then pvs, then anything else. This ensures that when a +// pod is backed up, we can perform a pre hook, then process pvcs and pvs (including taking a +// snapshot), then perform a post hook on the pod. +const ( + pod = iota + pvc + pv + other +) + +// getAllItems gets all relevant items from all API groups. +func (r *itemCollector) getAllItems() []*kubernetesResource { + var resources []*kubernetesResource + for _, group := range r.discoveryHelper.Resources() { + groupItems, err := r.getGroupItems(group) + if err != nil { + klog.Errorf("Error collecting resources from API group %s. %v", group.String(), err) + continue + } + + resources = append(resources, groupItems...) + } + + return resources +} + +// getGroupItems collects all relevant items from a single API group. +func (r *itemCollector) getGroupItems(group *metav1.APIResourceList) ([]*kubernetesResource, error) { + klog.Infof("Getting items for group %s", group.GroupVersion) + + // Parse so we can check if this is the core group + gv, err := schema.ParseGroupVersion(group.GroupVersion) + if err != nil { + return nil, errors.Wrapf(err, "error parsing GroupVersion %q", group.GroupVersion) + } + if gv.Group == "" { + // This is the core group, so make sure we process in the following order: pods, pvcs, pvs, else + sortCoreGroup(group) + } + + var items []*kubernetesResource + for _, resource := range group.APIResources { + resourceItems, err := r.getResourceItems(gv, resource) + if err != nil { + klog.Errorf("Error getting items for resource %s", resource.String()) + continue + } + + items = append(items, resourceItems...) + } + + return items, nil +} + +// getResourceItems collects all relevant items for a given group-version-resource. +func (r *itemCollector) getResourceItems(gv schema.GroupVersion, resource metav1.APIResource) ([]*kubernetesResource, error) { + klog.Info("Getting items for resource %s", resource.Name) + + var ( + gvr = gv.WithResource(resource.Name) + gr = gvr.GroupResource() + ) + + //orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name) + + // Getting the preferred group version of this resource + preferredGVR, _, err := r.discoveryHelper.ResourceFor(gr.WithVersion("")) + if err != nil { + return nil, errors.WithStack(err) + } + + if !r.request.ResourceIncludesExcludes.ShouldInclude(gr.String()) { + klog.Infof("Skipping resource %s because it's excluded", gr.String()) + return nil, nil + } + + //if cohabitator, found := r.cohabitatingResources[resource.Name]; found { + // if gv.Group == cohabitator.groupResource1.Group || gv.Group == cohabitator.groupResource2.Group { + // if cohabitator.seen { + // log.WithFields( + // logrus.Fields{ + // "cohabitatingResource1": cohabitator.groupResource1.String(), + // "cohabitatingResource2": cohabitator.groupResource2.String(), + // }, + // ).Infof("Skipping resource because it cohabitates and we've already processed it") + // return nil, nil + // } + // cohabitator.seen = true + // } + //} + + // Handle namespace resource here + if gr == kuberesource.Namespaces { + resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, "") + if err != nil { + klog.Errorf("Error getting dynamic client. %v", errors.WithStack(err)) + return nil, errors.WithStack(err) + } + unstructuredList, err := resourceClient.List(metav1.ListOptions{}) + if err != nil { + klog.Errorf("Error list namespaces. %v", errors.WithStack(err)) + return nil, errors.WithStack(err) + } + + items := r.backupNamespaces(unstructuredList, r.request.NamespaceIncludesExcludes, gr, preferredGVR) + + return items, nil + } + + clusterScoped := !resource.Namespaced + namespacesToList := getNamespacesToList(r.request.NamespaceIncludesExcludes) + + if clusterScoped { + //namespacesToList = []string{""} + return nil, nil + } + + var items []*kubernetesResource + + for _, namespace := range namespacesToList { + // List items from kubernetes API + + resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace) + if err != nil { + klog.Errorf("Error getting dynamic client. %v", err) + continue + } + + var orLabelSelectors []string + //if r.backupRequest.Spec.OrLabelSelectors != nil { + // for _, s := range r.backupRequest.Spec.OrLabelSelectors { + // orLabelSelectors = append(orLabelSelectors, metav1.FormatLabelSelector(s)) + // } + //} else { + // orLabelSelectors = []string{} + //} + + unstructuredItems := make([]unstructured.Unstructured, 0) + + // Listing items for orLabelSelectors + //errListingForNS := false + //for _, label := range orLabelSelectors { + // unstructuredItems, err = r.listItemsForLabel(unstructuredItems, gr, label, resourceClient) + // if err != nil { + // errListingForNS = true + // } + //} + + //if errListingForNS { + // log.WithError(err).Error("Error listing items") + // continue + //} + + var labelSelector string + //if selector := r.backupRequest.Spec.LabelSelector; selector != nil { + // labelSelector = metav1.FormatLabelSelector(selector) + //} + + // Listing items for labelSelector (singular) + if len(orLabelSelectors) == 0 { + unstructuredItems, err = r.listItemsForLabel(unstructuredItems, gr, labelSelector, resourceClient) + if err != nil { + klog.Errorf("Error listing items. %v", err) + continue + } + } + + // Collect items in included Namespaces + for i := range unstructuredItems { + item := &unstructuredItems[i] + + path, err := r.writeToFile(item) + if err != nil { + klog.Errorf("Error writing item to file. %v", err) + continue + } + + items = append(items, &kubernetesResource{ + groupResource: gr, + preferredGVR: preferredGVR, + namespace: item.GetNamespace(), + name: item.GetName(), + path: path, + }) + } + } + + //if len(orders) > 0 { + // items = sortResourcesByOrder(r.log, items, orders) + //} + + return items, nil +} + +func (r *itemCollector) listItemsForLabel(unstructuredItems []unstructured.Unstructured, gr schema.GroupResource, label string, resourceClient client.Dynamic) ([]unstructured.Unstructured, error) { + unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: label}) + if err != nil { + klog.Errorf("Error listing items. %v", errors.WithStack(err)) + return unstructuredItems, err + } + unstructuredItems = append(unstructuredItems, unstructuredList.Items...) + return unstructuredItems, nil +} + +// backupNamespaces process namespace resource according to namespace filters. +func (r *itemCollector) backupNamespaces(unstructuredList *unstructured.UnstructuredList, ie *collections.IncludesExcludes, + gr schema.GroupResource, preferredGVR schema.GroupVersionResource) []*kubernetesResource { + var items []*kubernetesResource + for index, unstructured := range unstructuredList.Items { + if ie.ShouldInclude(unstructured.GetName()) { + klog.Infof("Backup namespace %s.", unstructured.GetName()) + + path, err := r.writeToFile(&unstructuredList.Items[index]) + if err != nil { + klog.Errorf("Error writing item to file. %v", err) + continue + } + + items = append(items, &kubernetesResource{ + groupResource: gr, + preferredGVR: preferredGVR, + name: unstructured.GetName(), + path: path, + }) + } + } + + return items +} + +func (r *itemCollector) writeToFile(item *unstructured.Unstructured) (string, error) { + logrus.Infof("dir path: %s", r.dir) + f, err := os.CreateTemp(r.dir, "") + if err != nil { + return "", errors.Wrap(err, "error creating temp file") + } + defer f.Close() + + jsonBytes, err := json.Marshal(item) + if err != nil { + return "", errors.Wrap(err, "error convering item to JSON") + } + + if _, err := f.Write(jsonBytes); err != nil { + return "", errors.Wrap(err, "error writing JSON to file") + } + + if err := f.Close(); err != nil { + return "", errors.Wrap(err, "error closing file") + } + + return f.Name(), nil +} + +// SortCoreGroup sorts the core API group +func sortCoreGroup(group *metav1.APIResourceList) { + sort.SliceStable(group.APIResources, func(i, j int) bool { + return coreGroupResourcePriority(group.APIResources[i].Name) < coreGroupResourcePriority(group.APIResources[j].Name) + }) +} + +func coreGroupResourcePriority(resource string) int { + switch strings.ToLower(resource) { + case "pods": + return pod + case "persistentvolumeclaims": + return pvc + case "persistentvolumes": + return pv + } + + return other +} + +func getNamespacesToList(ie *collections.IncludesExcludes) []string { + if ie == nil { + return []string{""} + } + + var list []string + for _, i := range ie.GetIncludes() { + if ie.ShouldInclude(i) { + list = append(list, i) + } + } + + return list +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/backup/pvc_backup_action.go b/pkg/clustertree/cluster-manager/controllers/promote/backup/pvc_backup_action.go new file mode 100644 index 000000000..10705a023 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/backup/pvc_backup_action.go @@ -0,0 +1,40 @@ +package backup + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/kuberesource" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" +) + +type PVCAction struct { +} + +func NewPVCAction() *PVCAction { + return &PVCAction{} +} + +func (p *PVCAction) Resource() string { + return "persistentvolumeclaims" +} + +func (s *PVCAction) Execute(item runtime.Unstructured, backup *kubernetesBackupper) (runtime.Unstructured, []requests.ResourceIdentifier, error) { + var pvc corev1.PersistentVolumeClaim + + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(item.UnstructuredContent(), &pvc); err != nil { + return nil, nil, errors.Wrap(err, "unable to convert unstructured item to persistent volume claim") + } + + if pvc.Status.Phase != corev1.ClaimBound || pvc.Spec.VolumeName == "" { + return item, nil, nil + } + + pv := requests.ResourceIdentifier{ + GroupResource: kuberesource.PersistentVolumes, + Name: pvc.Spec.VolumeName, + } + + return item, []requests.ResourceIdentifier{pv}, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/backup/rbac.go b/pkg/clustertree/cluster-manager/controllers/promote/backup/rbac.go new file mode 100644 index 000000000..456ae3864 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/backup/rbac.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "context" + + "github.com/pkg/errors" + rbac "k8s.io/api/rbac/v1" + rbacbeta "k8s.io/api/rbac/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + rbacclient "k8s.io/client-go/kubernetes/typed/rbac/v1" + rbacbetaclient "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" +) + +// ClusterRoleBindingLister allows for listing ClusterRoleBindings in a version-independent way. +type ClusterRoleBindingLister interface { + // List returns a slice of ClusterRoleBindings which can represent either v1 or v1beta1 ClusterRoleBindings. + List() ([]ClusterRoleBinding, error) +} + +// noopClusterRoleBindingLister exists to handle clusters where RBAC is disabled. +type noopClusterRoleBindingLister struct { +} + +func (noop noopClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) { + return []ClusterRoleBinding{}, nil +} + +type v1ClusterRoleBindingLister struct { + client rbacclient.ClusterRoleBindingInterface +} + +func (v1 v1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) { + crbList, err := v1.client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, errors.WithStack(err) + } + var crbs []ClusterRoleBinding + for _, crb := range crbList.Items { + crbs = append(crbs, v1ClusterRoleBinding{crb: crb}) + } + + return crbs, nil +} + +type v1beta1ClusterRoleBindingLister struct { + client rbacbetaclient.ClusterRoleBindingInterface +} + +func (v1beta1 v1beta1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) { + crbList, err := v1beta1.client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, errors.WithStack(err) + } + var crbs []ClusterRoleBinding + for _, crb := range crbList.Items { + crbs = append(crbs, v1beta1ClusterRoleBinding{crb: crb}) + } + + return crbs, nil +} + +// NewClusterRoleBindingListerMap creates a map of RBAC version strings to their associated +// ClusterRoleBindingLister structs. +// Necessary so that callers to the ClusterRoleBindingLister interfaces don't need the kubernetes.Interface. +func NewClusterRoleBindingListerMap(clientset kubernetes.Interface) map[string]ClusterRoleBindingLister { + return map[string]ClusterRoleBindingLister{ + rbac.SchemeGroupVersion.Version: v1ClusterRoleBindingLister{client: clientset.RbacV1().ClusterRoleBindings()}, + rbacbeta.SchemeGroupVersion.Version: v1beta1ClusterRoleBindingLister{client: clientset.RbacV1beta1().ClusterRoleBindings()}, + "": noopClusterRoleBindingLister{}, + } +} + +// ClusterRoleBinding abstracts access to ClusterRoleBindings whether they're v1 or v1beta1. +type ClusterRoleBinding interface { + // Name returns the name of a ClusterRoleBinding. + Name() string + // ServiceAccountSubjects returns the names of subjects that are service accounts in the given namespace. + ServiceAccountSubjects(namespace string) []string + // RoleRefName returns the name of a ClusterRoleBinding's RoleRef. + RoleRefName() string +} + +type v1ClusterRoleBinding struct { + crb rbac.ClusterRoleBinding +} + +func (c v1ClusterRoleBinding) Name() string { + return c.crb.Name +} + +func (c v1ClusterRoleBinding) RoleRefName() string { + return c.crb.RoleRef.Name +} + +func (c v1ClusterRoleBinding) ServiceAccountSubjects(namespace string) []string { + var saSubjects []string + for _, s := range c.crb.Subjects { + if s.Kind == rbac.ServiceAccountKind && s.Namespace == namespace { + saSubjects = append(saSubjects, s.Name) + } + } + return saSubjects +} + +type v1beta1ClusterRoleBinding struct { + crb rbacbeta.ClusterRoleBinding +} + +func (c v1beta1ClusterRoleBinding) Name() string { + return c.crb.Name +} + +func (c v1beta1ClusterRoleBinding) RoleRefName() string { + return c.crb.RoleRef.Name +} + +func (c v1beta1ClusterRoleBinding) ServiceAccountSubjects(namespace string) []string { + var saSubjects []string + for _, s := range c.crb.Subjects { + if s.Kind == rbac.ServiceAccountKind && s.Namespace == namespace { + saSubjects = append(saSubjects, s.Name) + } + } + return saSubjects +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/backup/register_action.go b/pkg/clustertree/cluster-manager/controllers/promote/backup/register_action.go new file mode 100644 index 000000000..bdecee744 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/backup/register_action.go @@ -0,0 +1,58 @@ +package backup + +import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" +) + +// BackupItemAction is an actor that performs an operation on an individual item being backed up. +type BackupItemAction interface { + + // return resource.group + Resource() string + + // Execute allows the ItemAction to perform arbitrary logic with the item being backed up, + // including mutating the item itself prior to backup. The item (unmodified or modified) + // should be returned, along with an optional slice of ResourceIdentifiers specifying + // additional related items that should be backed up. + Execute(item runtime.Unstructured, backup *kubernetesBackupper) (runtime.Unstructured, []requests.ResourceIdentifier, error) +} + +func registerBackupActions() (map[string]BackupItemAction, error) { + actionMap := make(map[string]BackupItemAction, 3) + + if err := registerBackupItemAction(actionMap, newPvcBackupItemAction); err != nil { + return nil, err + } + + if err := registerBackupItemAction(actionMap, newServiceAccountBackupItemAction); err != nil { + return nil, err + } + + return actionMap, nil +} + +func registerBackupItemAction(actionsMap map[string]BackupItemAction, initializer types.HandlerInitializer) error { + instance, err := initializer() + if err != nil { + return errors.WithMessage(err, "init backup action instance error") + } + + itemAction, ok := instance.(BackupItemAction) + if !ok { + return errors.Errorf("%T is not a detach item action", instance) + } + actionsMap[itemAction.Resource()] = itemAction + return nil +} + +func newPvcBackupItemAction() (interface{}, error) { + return NewPVCAction(), nil +} + +func newServiceAccountBackupItemAction() (interface{}, error) { + return NewServiceAccountAction(), nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/backup/service_account_backup_action.go b/pkg/clustertree/cluster-manager/controllers/promote/backup/service_account_backup_action.go new file mode 100644 index 000000000..a351f2f8c --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/backup/service_account_backup_action.go @@ -0,0 +1,110 @@ +package backup + +import ( + "github.com/pkg/errors" + rbac "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/kuberesource" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" +) + +// SerivceAccountAction implements ItemAction +type ServiceAccountAction struct { + clusterRoleBindings []ClusterRoleBinding + fetched bool +} + +func NewServiceAccountAction() *ServiceAccountAction { + return &ServiceAccountAction{ + fetched: false, + } +} + +func (s *ServiceAccountAction) Resource() string { + return "serviceaccounts" +} + +// Execute checks for any ClusterRoleBindings that have this service account as a subject, and +// adds the ClusterRoleBinding and associated ClusterRole to the list of additional items to +// be backed up. +func (s *ServiceAccountAction) Execute(item runtime.Unstructured, backup *kubernetesBackupper) (runtime.Unstructured, []requests.ResourceIdentifier, error) { + klog.Info("Running ServiceAccountAction") + defer klog.Info("Done running ServiceAccountAction") + + if !s.fetched { + err := s.fetchClusterRoleBindings(backup) + if err != nil { + return nil, nil, errors.WithMessage(err, "fetchClusterRoleBindings error") + } + s.fetched = true + } + + objectMeta, err := meta.Accessor(item) + if err != nil { + return nil, nil, errors.WithStack(err) + } + + var ( + namespace = objectMeta.GetNamespace() + name = objectMeta.GetName() + bindings = sets.NewString() + roles = sets.NewString() + ) + + for _, crb := range s.clusterRoleBindings { + for _, subject := range crb.ServiceAccountSubjects(namespace) { + if subject == name { + klog.Infof("Adding clusterrole %s and clusterrolebinding %s to additionalItems since serviceaccount %s/%s is a subject", + crb.RoleRefName(), crb.Name(), namespace, name) + } + + bindings.Insert(crb.Name()) + roles.Insert(crb.RoleRefName()) + } + } + + var additionalItems []requests.ResourceIdentifier + for binding := range bindings { + additionalItems = append(additionalItems, requests.ResourceIdentifier{ + GroupResource: kuberesource.ClusterRoleBindings, + Name: binding, + }) + } + + for role := range roles { + additionalItems = append(additionalItems, requests.ResourceIdentifier{ + GroupResource: kuberesource.ClusterRoles, + Name: role, + }) + } + + return item, additionalItems, nil +} + +func (s *ServiceAccountAction) fetchClusterRoleBindings(backup *kubernetesBackupper) error { + clusterRoleBindingListers := NewClusterRoleBindingListerMap(backup.kubeclient) + // Look up the supported RBAC version + var supportedAPI metav1.GroupVersionForDiscovery + for _, ag := range backup.discoveryHelper.APIGroups() { + if ag.Name == rbac.GroupName { + supportedAPI = ag.PreferredVersion + break + } + } + + crbLister := clusterRoleBindingListers[supportedAPI.Version] + + // This should be safe because the List call will return a 0-item slice if there is no matching API version + crbs, err := crbLister.List() + if err != nil { + return err + } + + s.clusterRoleBindings = crbs + return nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/client/dynamic.go b/pkg/clustertree/cluster-manager/controllers/promote/client/dynamic.go new file mode 100644 index 000000000..0e9655b11 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/client/dynamic.go @@ -0,0 +1,149 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" +) + +// DynamicFactory contains methods for retrieving dynamic clients for GroupVersionResources and +// GroupVersionKinds. +type DynamicFactory interface { + // ClientForGroupVersionResource returns a Dynamic client for the given group/version + // and resource for the given namespace. + ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (Dynamic, error) + // DynamicSharedInformerFactoryForNamespace returns a DynamicSharedInformerFactory for the given namespace. + DynamicSharedInformerFactoryForNamespace(namespace string) dynamicinformer.DynamicSharedInformerFactory +} + +// dynamicFactory implements DynamicFactory. +type dynamicFactory struct { + dynamicClient dynamic.Interface +} + +// NewDynamicFactory returns a new ClientPool-based dynamic factory. +func NewDynamicFactory(dynamicClient dynamic.Interface) DynamicFactory { + return &dynamicFactory{dynamicClient: dynamicClient} +} + +func (f *dynamicFactory) ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (Dynamic, error) { + return &dynamicResourceClient{ + resourceClient: f.dynamicClient.Resource(gv.WithResource(resource.Name)).Namespace(namespace), + }, nil +} + +func (f *dynamicFactory) DynamicSharedInformerFactoryForNamespace(namespace string) dynamicinformer.DynamicSharedInformerFactory { + return dynamicinformer.NewFilteredDynamicSharedInformerFactory(f.dynamicClient, time.Minute, namespace, nil) +} + +// Creator creates an object. +type Creator interface { + // Create creates an object. + Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) +} + +// Lister lists objects. +type Lister interface { + // List lists all the objects of a given resource. + List(metav1.ListOptions) (*unstructured.UnstructuredList, error) +} + +// Watcher watches objects. +type Watcher interface { + // Watch watches for changes to objects of a given resource. + Watch(metav1.ListOptions) (watch.Interface, error) +} + +// Getter gets an object. +type Getter interface { + // Get fetches an object by name. + Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) +} + +// Patcher patches an object. +type Patcher interface { + //Patch patches the named object using the provided patch bytes, which are expected to be in JSON merge patch format. The patched object is returned. + + Patch(name string, data []byte) (*unstructured.Unstructured, error) +} + +// Deletor deletes an object. +type Deletor interface { + //Patch patches the named object using the provided patch bytes, which are expected to be in JSON merge patch format. The patched object is returned. + + Delete(name string, opts metav1.DeleteOptions) error +} + +// StatusUpdater updates status field of a object +type StatusUpdater interface { + UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) +} + +// Dynamic contains client methods that Velero needs for backing up and restoring resources. +type Dynamic interface { + Creator + Lister + Watcher + Getter + Patcher + Deletor + StatusUpdater +} + +// dynamicResourceClient implements Dynamic. +type dynamicResourceClient struct { + resourceClient dynamic.ResourceInterface +} + +var _ Dynamic = &dynamicResourceClient{} + +func (d *dynamicResourceClient) Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + return d.resourceClient.Create(context.TODO(), obj, metav1.CreateOptions{}) +} + +func (d *dynamicResourceClient) List(options metav1.ListOptions) (*unstructured.UnstructuredList, error) { + return d.resourceClient.List(context.TODO(), options) +} + +func (d *dynamicResourceClient) Watch(options metav1.ListOptions) (watch.Interface, error) { + return d.resourceClient.Watch(context.TODO(), options) +} + +func (d *dynamicResourceClient) Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) { + return d.resourceClient.Get(context.TODO(), name, opts) +} + +func (d *dynamicResourceClient) Patch(name string, data []byte) (*unstructured.Unstructured, error) { + return d.resourceClient.Patch(context.TODO(), name, types.MergePatchType, data, metav1.PatchOptions{}) +} + +func (d *dynamicResourceClient) Delete(name string, opts metav1.DeleteOptions) error { + return d.resourceClient.Delete(context.TODO(), name, opts) +} + +func (d *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { + return d.resourceClient.UpdateStatus(context.TODO(), obj, opts) +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/constants/constants.go b/pkg/clustertree/cluster-manager/controllers/promote/constants/constants.go new file mode 100644 index 000000000..5e45909c2 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/constants/constants.go @@ -0,0 +1,29 @@ +package constants + +const ( + BackupDir = "/data/backup/" + + // ResourcesDir is a top-level directory expected in backups which contains sub-directories + // for each resource type in the backup. + ResourcesDir = "resources" + + // MetadataDir is a top-level directory expected in backups which contains + // files that store metadata about the backup, such as the backup version. + MetadataDir = "metadata" + + // ClusterScopedDir is the name of the directory containing cluster-scoped + ClusterScopedDir = "cluster" + + // NamespaceScopedDir is the name of the directory containing namespace-scoped + NamespaceScopedDir = "namespaces" + + BackupFormatVersion = "1.1.0" + + // PreferredVersionDir is the suffix name of the directory containing the preferred version of the API group + PreferredVersionDir = "-preferredversion" + + ItemRestoreResultCreated = "created" + ItemRestoreResultUpdated = "updated" + ItemRestoreResultFailed = "failed" + ItemRestoreResultSkipped = "skipped" +) diff --git a/pkg/clustertree/cluster-manager/controllers/promote/detach/detach.go b/pkg/clustertree/cluster-manager/controllers/promote/detach/detach.go new file mode 100644 index 000000000..91fc5fce7 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/detach/detach.go @@ -0,0 +1,492 @@ +package detach + +import ( + "io" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/discovery" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/kuberesource" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/archive" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/kube" +) + +// detach order, crd is detached first +var defaultDetachPriorities = []schema.GroupResource{ + kuberesource.StatefulSets, + kuberesource.Deployments, + kuberesource.ReplicaSets, + kuberesource.Services, + kuberesource.PersistentVolumeClaims, + kuberesource.PersistentVolumes, + kuberesource.ServiceAccounts, + kuberesource.Configmaps, + kuberesource.Secrets, + kuberesource.Roles, + kuberesource.RoleBindings, + kuberesource.Pods, +} + +var defaultUndetachPriorities = []schema.GroupResource{ + kuberesource.Pods, + kuberesource.RoleBindings, + kuberesource.Roles, + kuberesource.Configmaps, + kuberesource.Secrets, + kuberesource.ServiceAccounts, + kuberesource.PersistentVolumes, + kuberesource.PersistentVolumeClaims, + kuberesource.Services, + kuberesource.ReplicaSets, + kuberesource.Deployments, + kuberesource.StatefulSets, +} + +type kubernetesDetacher struct { + request *requests.PromoteRequest + discoveryHelper discovery.Helper + dynamicFactory client.DynamicFactory // used for connect leaf cluster + fileSystem filesystem.Interface + backupReader io.Reader + resourceClients map[resourceClientKey]client.Dynamic + detachDir string + actions map[string]DetachItemAction + kosmosClusterName string + ownerItems map[ownerReferenceKey]struct{} +} + +type ownerReferenceKey struct { + apiVersion string + kind string +} + +func NewKubernetesDetacher(request *requests.PromoteRequest, backupReader io.Reader) (*kubernetesDetacher, error) { + actions, err := registerDetachActions() + if err != nil { + return nil, err + } + dynamicFactory := client.NewDynamicFactory(request.LeafDynamicClient) + discoveryHelper, err := discovery.NewHelper(request.LeafDiscoveryClient) + if err != nil { + return nil, err + } + + return &kubernetesDetacher{ + request: request, + discoveryHelper: discoveryHelper, + dynamicFactory: dynamicFactory, + fileSystem: filesystem.NewFileSystem(), + backupReader: backupReader, + resourceClients: make(map[resourceClientKey]client.Dynamic), + actions: actions, + kosmosClusterName: request.Spec.ClusterName, + }, nil +} + +// restoreableResource represents map of individual items of each resource +// identifier grouped by their original namespaces. +type detachableResource struct { + resource string + selectedItemsByNamespace map[string][]detachableItem + totalItems int +} + +type detachableItem struct { + path string + targetNamespace string + name string + version string // used for initializing informer cache +} + +type resourceClientKey struct { + resource schema.GroupVersionResource + namespace string +} + +func (d *kubernetesDetacher) Detach() error { + defer func() { + // todo rollback if needed? + }() + + dir, err := archive.NewExtractor(d.fileSystem).UnzipAndExtractBackup(d.backupReader) + if err != nil { + return errors.Errorf("error unzipping and extracting: %v", err) + } + defer func() { + if err := d.fileSystem.RemoveAll(dir); err != nil { + klog.Errorf("error removing temporary directory %s: %s", dir, err.Error()) + } + }() + + // Need to set this for additionalItems to be restored. + d.detachDir = dir + d.ownerItems = map[ownerReferenceKey]struct{}{} + + backupResources, err := archive.NewParser(d.fileSystem).Parse(d.detachDir) + if err != nil { + return errors.Errorf("error parse detachDir %s: %v", d.detachDir, err) + } + klog.Infof("total backup resources size: %v", len(backupResources)) + + resourceCollection, err := d.getOrderedResourceCollection(backupResources, defaultDetachPriorities) + if err != nil { + return err + } + + for _, selectedResource := range resourceCollection { + err = d.processSelectedResource(selectedResource) + if err != nil { + return err + } + } + + return nil +} + +func (d *kubernetesDetacher) processSelectedResource(selectedResource detachableResource) error { + groupResource := schema.ParseGroupResource(selectedResource.resource) + + for _, selectedItems := range selectedResource.selectedItemsByNamespace { + for _, selectedItem := range selectedItems { + obj, err := archive.Unmarshal(d.fileSystem, selectedItem.path) + if err != nil { + return errors.Errorf("error decoding %q: %v", strings.Replace(selectedItem.path, d.detachDir+"/", "", -1), err) + } + + err = d.detachItem(obj, groupResource, selectedItem.targetNamespace) + if err != nil { + return errors.Wrap(err, "detachItem error") + } + + item := types.ItemKey{ + Resource: groupResource.String(), + Name: selectedItem.name, + Namespace: selectedItem.targetNamespace, + } + d.request.DetachedItems[item] = struct{}{} + } + } + return nil +} + +func (d *kubernetesDetacher) detachItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) error { + resourceClient, err := d.getResourceClient(groupResource, obj, namespace) + if err != nil { + return errors.Wrap(err, "getResourceClient error") + } + + klog.Infof("detach resource %s, name: %s, namespace: %s", groupResource.String(), obj.GetName(), obj.GetNamespace()) + + if action, ok := d.actions[groupResource.String()]; ok { + err := action.Execute(obj, resourceClient, d) + if err != nil { + return errors.Errorf("%s detach action error: %v", groupResource.String(), err) + } + return nil + } else { + klog.Infof("no action found for resource %s, delete it", groupResource.String()) + updatedOwnerObj := obj.DeepCopy() + if updatedOwnerObj.GetFinalizers() != nil { + updatedOwnerObj.SetFinalizers(nil) + patchBytes, err := generatePatch(obj, updatedOwnerObj) + if err != nil { + return errors.Wrap(err, "error generating patch") + } + + _, err = resourceClient.Patch(updatedOwnerObj.GetName(), patchBytes) + if err != nil { + return errors.Wrapf(err, "error patch %s %s", groupResource.String(), updatedOwnerObj.GetName()) + } + } + + deleteGraceSeconds := int64(0) + err = resourceClient.Delete(updatedOwnerObj.GetName(), metav1.DeleteOptions{GracePeriodSeconds: &deleteGraceSeconds}) + if err != nil { + return errors.Wrapf(err, "error delete %s %s", groupResource.String(), updatedOwnerObj.GetName()) + } + } + return nil +} + +func (d *kubernetesDetacher) Rollback(allDetached bool) error { + dir, err := archive.NewExtractor(d.fileSystem).UnzipAndExtractBackup(d.backupReader) + if err != nil { + return errors.Errorf("error unzipping and extracting: %v", err) + } + defer func() { + if err := d.fileSystem.RemoveAll(dir); err != nil { + klog.Errorf("error removing temporary directory %s: %s", dir, err.Error()) + } + }() + + d.detachDir = dir + d.ownerItems = map[ownerReferenceKey]struct{}{} + backupedResources, err := archive.NewParser(d.fileSystem).Parse(d.detachDir) + if err != nil { + return errors.Errorf("error parse detachDir %s: %v", d.detachDir, err) + } + klog.Infof("total backup resources size: %v", len(backupedResources)) + + resourceCollection, err := d.getOrderedResourceCollection(backupedResources, defaultUndetachPriorities) + if err != nil { + return err + } + + for _, selectedResource := range resourceCollection { + err = d.rollbackSelectedResource(selectedResource, allDetached) + if err != nil { + return err + } + } + + return nil +} + +func (d *kubernetesDetacher) rollbackSelectedResource(selectedResource detachableResource, allDetached bool) error { + groupResource := schema.ParseGroupResource(selectedResource.resource) + + for _, selectedItems := range selectedResource.selectedItemsByNamespace { + for _, selectedItem := range selectedItems { + if !allDetached { + item := types.ItemKey{ + Resource: groupResource.String(), + Name: selectedItem.name, + Namespace: selectedItem.targetNamespace, + } + + if _, ok := d.request.DetachedItems[item]; !ok { + // undetached resource, doesn't need to handle + continue + } + } + + obj, err := archive.Unmarshal(d.fileSystem, selectedItem.path) + if err != nil { + return errors.Errorf("error decoding %q: %v", strings.Replace(selectedItem.path, d.detachDir+"/", "", -1), err) + } + + err = d.undetachItem(obj, groupResource, selectedItem.targetNamespace) + if err != nil { + return errors.Wrap(err, "UndetachItem error") + } + } + } + return nil +} + +func (d *kubernetesDetacher) undetachItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) error { + resourceClient, err := d.getResourceClient(groupResource, obj, namespace) + if err != nil { + return errors.Wrap(err, "getResourceClient error") + } + + klog.Infof("Undetach resource %s, name: %s", groupResource.String(), obj.GetName()) + + if action, ok := d.actions[groupResource.String()]; ok { + err = action.Revert(obj, resourceClient, d) + if err != nil { + return errors.Errorf("%s Undetach action error: %v", groupResource.String(), err) + } + + return nil + } else { + klog.Infof("no action found for resource %s, create it immediately", groupResource.String()) + newObj := obj.DeepCopy() + newObj, err := kube.ResetMetadataAndStatus(newObj) + if err != nil { + return errors.Wrapf(err, "reset %s %s metadata error", obj.GroupVersionKind().String(), obj.GetName()) + } + + _, err = resourceClient.Create(newObj) + if err != nil { + if apierrors.IsAlreadyExists(err) { + klog.Infof("resource %s is already exist. skip create", newObj.GetName()) + return nil + } + return errors.Wrap(err, "create resource "+newObj.GetName()+" failed.") + } + } + return nil +} + +func (d *kubernetesDetacher) getOrderedResourceCollection( + backupResources map[string]*archive.ResourceItems, + groupResourcePriorities []schema.GroupResource, +) ([]detachableResource, error) { + detachResourceCollection := make([]detachableResource, 20) + + for _, groupResource := range groupResourcePriorities { + // try to resolve the resource via discovery to a complete group/version/resource + _, _, err := d.discoveryHelper.ResourceFor(groupResource.WithVersion("")) + if err != nil { + klog.Infof("Skipping restore of resource %s because it cannot be resolved via discovery", groupResource.String()) + continue + } + + // Check if the resource is present in the backup + resourceList := backupResources[groupResource.String()] + if resourceList == nil { + klog.Infof("Skipping restore of resource %s because it's not present in the backup tarball", groupResource.String()) + continue + } + + // Iterate through each namespace that contains instances of the + // resource and append to the list of to-be restored resources. + for namespace, items := range resourceList.ItemsByNamespace { + res, err := d.getSelectedDetachableItems(groupResource.String(), namespace, items) + if err != nil { + return nil, err + } + + detachResourceCollection = append(detachResourceCollection, res) + } + } + + for owner := range d.ownerItems { + klog.Infof("ownerReference: %s %s", owner.apiVersion, owner.kind) + gvk := schema.FromAPIVersionAndKind(owner.apiVersion, owner.kind) + gvr, _, err := d.discoveryHelper.KindFor(gvk) + if err != nil { + return nil, errors.Wrapf(err, "resource %s cannot be resolved via discovery", gvk.String()) + } + + resourceList := backupResources[gvr.GroupResource().String()] + if resourceList == nil { + klog.Infof("Skipping restore of resource %s because it's not present in the backup tarball", gvr.GroupResource().String()) + continue + } + + for namespace, items := range resourceList.ItemsByNamespace { + res, err := d.getSelectedDetachableItems(gvr.GroupResource().String(), namespace, items) + if err != nil { + return nil, err + } + + detachResourceCollection = append(detachResourceCollection, res) + } + } + + return detachResourceCollection, nil +} + +// getSelectedDetachableItems applies Kubernetes selectors on individual items +// of each resource type to create a list of items which will be actually +// restored. +func (d *kubernetesDetacher) getSelectedDetachableItems(resource string, namespace string, items []string) (detachableResource, error) { + detachable := detachableResource{ + resource: resource, + selectedItemsByNamespace: make(map[string][]detachableItem), + } + + targetNamespace := namespace + if targetNamespace != "" { + klog.Infof("Resource '%s' will be restored into namespace '%s'", resource, targetNamespace) + } else { + klog.Infof("Resource '%s' will be restored at cluster scope", resource) + } + + resourceForPath := resource + + for _, item := range items { + itemPath := archive.GetItemFilePath(d.detachDir, resourceForPath, namespace, item) + + obj, err := archive.Unmarshal(d.fileSystem, itemPath) + if err != nil { + return detachable, errors.Errorf("error decoding %q: %v", strings.Replace(itemPath, d.detachDir+"/", "", -1), err) + } + + if resource == kuberesource.Namespaces.String() { + // handle remapping for namespace resource + targetNamespace = item + } + + selectedItem := detachableItem{ + path: itemPath, + name: item, + targetNamespace: targetNamespace, + version: obj.GroupVersionKind().Version, + } + detachable.selectedItemsByNamespace[namespace] = + append(detachable.selectedItemsByNamespace[namespace], selectedItem) + detachable.totalItems++ + + if resource == kuberesource.StatefulSets.String() || resource == kuberesource.Deployments.String() { + for _, owner := range obj.GetOwnerReferences() { + ownerKey := ownerReferenceKey{ + apiVersion: owner.APIVersion, + kind: owner.Kind, + } + + d.ownerItems[ownerKey] = struct{}{} + } + } + } + return detachable, nil +} + +// generatePatch will calculate a JSON merge patch for an object's desired state. +// If the passed in objects are already equal, nil is returned. +func generatePatch(fromCluster, desired *unstructured.Unstructured) ([]byte, error) { + // If the objects are already equal, there's no need to generate a patch. + if equality.Semantic.DeepEqual(fromCluster, desired) { + return nil, nil + } + + desiredBytes, err := json.Marshal(desired.Object) + if err != nil { + return nil, errors.Wrap(err, "unable to marshal desired object") + } + + fromClusterBytes, err := json.Marshal(fromCluster.Object) + if err != nil { + return nil, errors.Wrap(err, "unable to marshal in-cluster object") + } + + patchBytes, err := jsonpatch.CreateMergePatch(fromClusterBytes, desiredBytes) + if err != nil { + return nil, errors.Wrap(err, "unable to create merge patch") + } + + return patchBytes, nil +} + +func (d *kubernetesDetacher) getResourceClient(groupResource schema.GroupResource, obj *unstructured.Unstructured, namespace string) (client.Dynamic, error) { + key := resourceClientKey{ + resource: groupResource.WithVersion(obj.GroupVersionKind().Version), + namespace: namespace, + } + + if client, ok := d.resourceClients[key]; ok { + return client, nil + } + + // Initialize client for this resource. We need metadata from an object to + // do this. + klog.Infof("Getting client for %v", obj.GroupVersionKind()) + + resource := metav1.APIResource{ + Namespaced: len(namespace) > 0, + Name: groupResource.Resource, + } + + clientForGroupVersionResource, err := d.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace) + if err != nil { + return nil, err + } + + d.resourceClients[key] = clientForGroupVersionResource + return clientForGroupVersionResource, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/detach/pod_detach_action.go b/pkg/clustertree/cluster-manager/controllers/promote/detach/pod_detach_action.go new file mode 100644 index 000000000..5cf0ce8b3 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/detach/pod_detach_action.go @@ -0,0 +1,99 @@ +package detach + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" +) + +// SecretAction is a restore item action for secrets +type PodAction struct { +} + +func NewPodAction() *PodAction { + return &PodAction{} +} + +func (p *PodAction) Resource() []string { + return []string{"pods"} +} + +func (p *PodAction) Execute(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error { + updatedPod := new(corev1.Pod) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, updatedPod); err != nil { + return err + } + + labels := updatedPod.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + if _, ok := labels["kosmos-io/pod"]; ok { + return nil + } else { + labels["kosmos-io/pod"] = "true" + podMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedPod) + if err != nil { + return errors.Wrap(err, "unable to convert pod to unstructured item") + } + patchBytes, err := generatePatch(obj, &unstructured.Unstructured{Object: podMap}) + if err != nil { + return errors.Wrap(err, "error generating patch") + } + if patchBytes == nil { + klog.Warningf("the same pod obj, %s", updatedPod.Name) + return nil + } + + _, err = client.Patch(updatedPod.Name, patchBytes) + return err + } +} + +func (p *PodAction) Revert(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error { + fromCluster, err := client.Get(obj.GetName(), metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Warningf("resource %s %s not found. skip undetach", obj.GroupVersionKind().String(), obj.GetName()) + return nil + } else { + return errors.Wrapf(err, "get resource %s %s failed.", obj.GroupVersionKind().String(), obj.GetName()) + } + } + + updatedPod := new(corev1.Pod) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(fromCluster.Object, updatedPod); err != nil { + return err + } + labels := updatedPod.GetLabels() + if labels != nil { + if _, ok := labels["kosmos-io/pod"]; ok { + delete(labels, "kosmos-io/pod") + delete(labels, "kosmos-io/synced") + updatedPod.SetLabels(labels) + podMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedPod) + if err != nil { + return errors.Wrap(err, "unable to convert pod to unstructured item") + } + patchBytes, err := generatePatch(fromCluster, &unstructured.Unstructured{Object: podMap}) + if err != nil { + return errors.Wrap(err, "error generating patch") + } + if patchBytes == nil { + klog.Warningf("the same pod obj, %s", updatedPod.Name) + return nil + } + + _, err = client.Patch(updatedPod.Name, patchBytes) + return err + } + } + + return nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/detach/register_action.go b/pkg/clustertree/cluster-manager/controllers/promote/detach/register_action.go new file mode 100644 index 000000000..e3e29400c --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/detach/register_action.go @@ -0,0 +1,66 @@ +package detach + +import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" +) + +// BackupItemAction is an actor that performs an operation on an individual item being backed up. +type DetachItemAction interface { + // return resource.group + Resource() []string + + // Execute allows the ItemAction to perform arbitrary logic with the item being backed up, + // including mutating the item itself prior to backup. The item (unmodified or modified) + // should be returned, along with an optional slice of ResourceIdentifiers specifying + // additional related items that should be backed up. + Execute(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error + + Revert(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error +} + +func registerDetachActions() (map[string]DetachItemAction, error) { + actionMap := make(map[string]DetachItemAction, 3) + + if err := registerDetachItemAction(actionMap, newPodDetachItemAction); err != nil { + return nil, err + } + if err := registerDetachItemAction(actionMap, newUniversalDetachItemAction); err != nil { + return nil, err + } + if err := registerDetachItemAction(actionMap, newStsDeployDetachItemAction); err != nil { + return nil, err + } + return actionMap, nil +} + +func registerDetachItemAction(actionsMap map[string]DetachItemAction, initializer types.HandlerInitializer) error { + instance, err := initializer() + if err != nil { + return errors.WithMessage(err, "init restore action instance error") + } + + itemAction, ok := instance.(DetachItemAction) + if !ok { + return errors.Errorf("%T is not a detach item action", instance) + } + for _, resource := range itemAction.Resource() { + actionsMap[resource] = itemAction + } + return nil +} + +func newPodDetachItemAction() (interface{}, error) { + return NewPodAction(), nil +} + +func newUniversalDetachItemAction() (interface{}, error) { + return NewUniversalAction(), nil +} + +func newStsDeployDetachItemAction() (interface{}, error) { + return NewStsDeployAction(), nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/detach/sts_deploy_detach_action.go b/pkg/clustertree/cluster-manager/controllers/promote/detach/sts_deploy_detach_action.go new file mode 100644 index 000000000..d22b68c79 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/detach/sts_deploy_detach_action.go @@ -0,0 +1,59 @@ +package detach + +import ( + "time" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/kube" +) + +type StsDeployAction struct { +} + +func NewStsDeployAction() *StsDeployAction { + return &StsDeployAction{} +} + +func (p *StsDeployAction) Resource() []string { + return []string{"statefulsets.apps", "deployments.apps", "replicasets.apps"} +} + +func (p *StsDeployAction) Execute(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error { + //级联删除sts、deployment、replicaset等 + orphanOption := metav1.DeletePropagationOrphan + err := client.Delete(obj.GetName(), metav1.DeleteOptions{PropagationPolicy: &orphanOption}) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Warningf("resource %s not found, skip delete", obj.GetName()) + return nil + } else { + return errors.Wrap(err, "DeletePropagationOrphan err") + } + } + return nil +} + +func (p *StsDeployAction) Revert(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error { + newObj := obj.DeepCopy() + newObj, err := kube.ResetMetadataAndStatus(newObj) + if err != nil { + return errors.Wrapf(err, "reset %s %s metadata error", obj.GroupVersionKind().String(), obj.GetName()) + } + + _, err = client.Create(newObj) + if err != nil { + if apierrors.IsAlreadyExists(err) { + klog.Infof("resource %s is already exist. skip create", newObj.GetName()) + return nil + } + return errors.Wrap(err, "create resource "+newObj.GetName()+" failed.") + } + time.Sleep(5 * time.Second) + return nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/detach/universal_detach_action.go b/pkg/clustertree/cluster-manager/controllers/promote/detach/universal_detach_action.go new file mode 100644 index 000000000..a79ed0728 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/detach/universal_detach_action.go @@ -0,0 +1,112 @@ +package detach + +import ( + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" +) + +type UniversalAction struct { +} + +func NewUniversalAction() *UniversalAction { + return &UniversalAction{} +} + +func (p *UniversalAction) Resource() []string { + return []string{"services", "persistentvolumeclaims", "persistentvolumes", "configmaps", "secrets", "serviceaccounts", + "roles.rbac.authorization.k8s.io", "rolebindings.rbac.authorization.k8s.io"} +} + +func (p *UniversalAction) Execute(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error { + updatedObj := obj.DeepCopy() + objectMeta, err := meta.Accessor(updatedObj) + if err != nil { + return errors.WithStack(err) + } + + annotations := objectMeta.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + + var key, val string + if obj.GetKind() == "Service" { + key = "kosmos.io/auto-create-mcs" + val = "true" + } else { + key = "kosmos-io/cluster-owners" + val = detacher.kosmosClusterName + } + + _, ok := annotations[key] + if updatedObj.GetOwnerReferences() != nil || !ok { + annotations[key] = val + updatedObj.SetAnnotations(annotations) + updatedObj.SetOwnerReferences(nil) + patchBytes, err := generatePatch(obj, updatedObj) + if err != nil { + return errors.Wrap(err, "error generating patch") + } + if patchBytes == nil { + klog.Warningf("the same obj, %s", objectMeta.GetName()) + } + + _, err = client.Patch(objectMeta.GetName(), patchBytes) + return err + } + + return nil +} + +//nolint:gosec // No need to check. +func (p *UniversalAction) Revert(obj *unstructured.Unstructured, client client.Dynamic, detacher *kubernetesDetacher) error { + fromCluster, err := client.Get(obj.GetName(), metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Warningf("resource %s not found. skip undetach", obj.GroupVersionKind().String(), obj.GetName()) + return nil + } else { + return errors.Wrapf(err, "get resource %s %s failed.", obj.GroupVersionKind().String(), obj.GetName()) + } + } + + updatedObj := fromCluster.DeepCopy() + objectMeta, err := meta.Accessor(updatedObj) + if err != nil { + return errors.WithStack(err) + } + + annotations := objectMeta.GetAnnotations() + if annotations != nil { + var key string + if obj.GetKind() == "Service" { + key = "kosmos.io/auto-create-mcs" + } else { + key = "kosmos-io/cluster-owners" + } + + if _, ok := annotations[key]; ok { + delete(annotations, key) + updatedObj.SetAnnotations(annotations) + patchBytes, err := generatePatch(fromCluster, updatedObj) + if err != nil { + return errors.Wrap(err, "error generating patch") + } + if patchBytes == nil { + klog.Warningf("the same obj, %s", objectMeta.GetName()) + return nil + } + + _, err = client.Patch(objectMeta.GetName(), patchBytes) + return err + } + } + + return nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/discovery/helper.go b/pkg/clustertree/cluster-manager/controllers/promote/discovery/helper.go new file mode 100644 index 000000000..0ecb6cba5 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/discovery/helper.go @@ -0,0 +1,289 @@ +/* +Copyright 2017, 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "sort" + "strings" + "sync" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/restmapper" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils" +) + +//go:generate mockery --name Helper + +// Helper exposes functions for interacting with the Kubernetes discovery +// API. +type Helper interface { + // Resources gets the current set of resources retrieved from discovery + // that are backuppable by Velero. + Resources() []*metav1.APIResourceList + + // ResourceFor gets a fully-resolved GroupVersionResource and an + // APIResource for the provided partially-specified GroupVersionResource. + ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, metav1.APIResource, error) + + // KindFor gets a fully-resolved GroupVersionResource and an + // APIResource for the provided partially-specified GroupVersionKind. + KindFor(input schema.GroupVersionKind) (schema.GroupVersionResource, metav1.APIResource, error) + + // Refresh pulls an updated set of Velero-backuppable resources from the + // discovery API. + Refresh() error + + // APIGroups gets the current set of supported APIGroups + // in the cluster. + APIGroups() []metav1.APIGroup + + // ServerVersion retrieves and parses the server's k8s version (git version) + // in the cluster. + ServerVersion() *version.Info +} + +type serverResourcesInterface interface { + // ServerPreferredResources() is used to populate Resources() with only Preferred Versions - this is the default + ServerPreferredResources() ([]*metav1.APIResourceList, error) + // ServerGroupsAndResources returns supported groups and resources for *all* groups and versions + // Used to populate Resources() if feature flag is passed + ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) +} + +type helper struct { + discoveryClient discovery.DiscoveryInterface + + // lock guards mapper, resources and resourcesMap + lock sync.RWMutex + mapper meta.RESTMapper + resources []*metav1.APIResourceList + resourcesMap map[schema.GroupVersionResource]metav1.APIResource + kindMap map[schema.GroupVersionKind]metav1.APIResource + apiGroups []metav1.APIGroup + serverVersion *version.Info +} + +var _ Helper = &helper{} + +func NewHelper(discoveryClient discovery.DiscoveryInterface) (Helper, error) { + h := &helper{ + discoveryClient: discoveryClient, + } + if err := h.Refresh(); err != nil { + return nil, err + } + return h, nil +} + +func (h *helper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, metav1.APIResource, error) { + h.lock.RLock() + defer h.lock.RUnlock() + + gvr, err := h.mapper.ResourceFor(input) + if err != nil { + return schema.GroupVersionResource{}, metav1.APIResource{}, err + } + + apiResource, found := h.resourcesMap[gvr] + if !found { + return schema.GroupVersionResource{}, metav1.APIResource{}, errors.Errorf("APIResource not found for GroupVersionResource %s", gvr) + } + + return gvr, apiResource, nil +} + +func (h *helper) KindFor(input schema.GroupVersionKind) (schema.GroupVersionResource, metav1.APIResource, error) { + h.lock.RLock() + defer h.lock.RUnlock() + + if resource, ok := h.kindMap[input]; ok { + return schema.GroupVersionResource{ + Group: resource.Group, + Version: resource.Version, + Resource: resource.Name, + }, resource, nil + } + m, err := h.mapper.RESTMapping(schema.GroupKind{Group: input.Group, Kind: input.Kind}, input.Version) + if err != nil { + return schema.GroupVersionResource{}, metav1.APIResource{}, err + } + if resource, ok := h.kindMap[m.GroupVersionKind]; ok { + return schema.GroupVersionResource{ + Group: resource.Group, + Version: resource.Version, + Resource: resource.Name, + }, resource, nil + } + return schema.GroupVersionResource{}, metav1.APIResource{}, errors.Errorf("APIResource not found for GroupVersionKind %v ", input) +} + +func (h *helper) Refresh() error { + h.lock.Lock() + defer h.lock.Unlock() + + groupResources, err := restmapper.GetAPIGroupResources(h.discoveryClient) + if err != nil { + return errors.WithStack(err) + } + + var serverResources []*metav1.APIResourceList + + // ServerPreferredResources() returns only preferred APIGroup - this is the default since no feature flag has been passed + serverPreferredResources, err := refreshServerPreferredResources(h.discoveryClient) + if err != nil { + return errors.WithStack(err) + } + serverResources = serverPreferredResources + + h.resources = discovery.FilteredBy( + And(filterByVerbs, skipSubresource), + serverResources, + ) + + sortResources(h.resources) + + shortcutExpander, err := utils.NewShortcutExpander(restmapper.NewDiscoveryRESTMapper(groupResources), h.resources) + if err != nil { + return errors.WithStack(err) + } + h.mapper = shortcutExpander + + h.resourcesMap = make(map[schema.GroupVersionResource]metav1.APIResource) + h.kindMap = make(map[schema.GroupVersionKind]metav1.APIResource) + for _, resourceGroup := range h.resources { + gv, err := schema.ParseGroupVersion(resourceGroup.GroupVersion) + if err != nil { + return errors.Wrapf(err, "unable to parse GroupVersion %s", resourceGroup.GroupVersion) + } + + for _, resource := range resourceGroup.APIResources { + gvr := gv.WithResource(resource.Name) + gvk := gv.WithKind(resource.Kind) + resource.Group = gv.Group + resource.Version = gv.Version + h.resourcesMap[gvr] = resource + h.kindMap[gvk] = resource + } + } + + apiGroupList, err := h.discoveryClient.ServerGroups() + if err != nil { + return errors.WithStack(err) + } + h.apiGroups = apiGroupList.Groups + + serverVersion, err := h.discoveryClient.ServerVersion() + if err != nil { + return errors.WithStack(err) + } + + h.serverVersion = serverVersion + + return nil +} + +func refreshServerPreferredResources(discoveryClient serverResourcesInterface) ([]*metav1.APIResourceList, error) { + preferredResources, err := discoveryClient.ServerPreferredResources() + if err != nil { + if discoveryErr, ok := err.(*discovery.ErrGroupDiscoveryFailed); ok { + for groupVersion, err := range discoveryErr.Groups { + klog.Warningf("Failed to discover group: %v. %v", groupVersion, err) + } + return preferredResources, nil + } + } + return preferredResources, err +} + +// And returns a composite predicate that implements a logical AND of the predicates passed to it. +func And(predicates ...discovery.ResourcePredicateFunc) discovery.ResourcePredicate { + return and{predicates} +} + +type and struct { + predicates []discovery.ResourcePredicateFunc +} + +func (a and) Match(groupVersion string, r *metav1.APIResource) bool { + for _, p := range a.predicates { + if !p(groupVersion, r) { + return false + } + } + + return true +} + +func filterByVerbs(groupVersion string, r *metav1.APIResource) bool { + return discovery.SupportsAllVerbs{Verbs: []string{"list", "create", "get", "delete"}}.Match(groupVersion, r) +} + +func skipSubresource(_ string, r *metav1.APIResource) bool { + // if we have a slash, then this is a subresource and we shouldn't include it. + return !strings.Contains(r.Name, "/") +} + +// sortResources sources resources by moving extensions to the end of the slice. The order of all +// the other resources is preserved. +func sortResources(resources []*metav1.APIResourceList) { + sort.SliceStable(resources, func(i, j int) bool { + left := resources[i] + leftGV, _ := schema.ParseGroupVersion(left.GroupVersion) + // not checking error because it should be impossible to fail to parse data coming from the + // apiserver + if leftGV.Group == "extensions" { + // always sort extensions at the bottom by saying left is "greater" + return false + } + + right := resources[j] + rightGV, _ := schema.ParseGroupVersion(right.GroupVersion) + // not checking error because it should be impossible to fail to parse data coming from the + // apiserver + if rightGV.Group == "extensions" { + // always sort extensions at the bottom by saying left is "less" + return true + } + + return i < j + }) +} + +func (h *helper) Resources() []*metav1.APIResourceList { + h.lock.RLock() + defer h.lock.RUnlock() + return h.resources +} + +func (h *helper) APIGroups() []metav1.APIGroup { + h.lock.RLock() + defer h.lock.RUnlock() + return h.apiGroups +} + +func (h *helper) ServerVersion() *version.Info { + h.lock.RLock() + defer h.lock.RUnlock() + return h.serverVersion +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/kuberesource/kuberesource.go b/pkg/clustertree/cluster-manager/controllers/promote/kuberesource/kuberesource.go new file mode 100644 index 000000000..a4f065697 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/kuberesource/kuberesource.go @@ -0,0 +1,45 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kuberesource + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + ClusterRoleBindings = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterrolebindings"} + ClusterRoles = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterroles"} + RoleBindings = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "rolebindings"} + Roles = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "roles"} + CustomResourceDefinitions = schema.GroupResource{Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"} + Jobs = schema.GroupResource{Group: "batch", Resource: "jobs"} + Namespaces = schema.GroupResource{Group: "", Resource: "namespaces"} + PersistentVolumeClaims = schema.GroupResource{Group: "", Resource: "persistentvolumeclaims"} + PersistentVolumes = schema.GroupResource{Group: "", Resource: "persistentvolumes"} + Pods = schema.GroupResource{Group: "", Resource: "pods"} + Configmaps = schema.GroupResource{Group: "", Resource: "configmaps"} + ServiceAccounts = schema.GroupResource{Group: "", Resource: "serviceaccounts"} + Secrets = schema.GroupResource{Group: "", Resource: "secrets"} + StatefulSets = schema.GroupResource{Group: "apps", Resource: "statefulsets"} + Deployments = schema.GroupResource{Group: "apps", Resource: "deployments"} + ReplicaSets = schema.GroupResource{Group: "apps", Resource: "replicasets"} + Services = schema.GroupResource{Group: "", Resource: "services"} + VolumeSnapshotClasses = schema.GroupResource{Group: "snapshot.storage.k8s.io", Resource: "volumesnapshotclasses"} + VolumeSnapshots = schema.GroupResource{Group: "snapshot.storage.k8s.io", Resource: "volumesnapshots"} + VolumeSnapshotContents = schema.GroupResource{Group: "snapshot.storage.k8s.io", Resource: "volumesnapshotcontents"} + PriorityClasses = schema.GroupResource{Group: "scheduling.k8s.io", Resource: "priorityclasses"} +) diff --git a/pkg/clustertree/cluster-manager/controllers/promote/precheck/precheck.go b/pkg/clustertree/cluster-manager/controllers/promote/precheck/precheck.go new file mode 100644 index 000000000..9ee9e891c --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/precheck/precheck.go @@ -0,0 +1,321 @@ +package precheck + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils" + constants "github.com/kosmos.io/kosmos/pkg/utils" +) + +type kubernetesPrecheck struct { + request *requests.PromoteRequest +} + +func NewKubernetesPrecheck(request *requests.PromoteRequest) (*kubernetesPrecheck, error) { + if request != nil { + return &kubernetesPrecheck{request: request}, nil + } else { + return nil, fmt.Errorf("request is nil") + } +} +func (kb *kubernetesPrecheck) Precheck() error { + // check namespace + err := checkNamespaces(kb.request, kb.request.ForbidNamespaces) + if err != nil { + return err + } + + // check ApiResources + err = checkApiResources(kb.request) + if err != nil { + return err + } + + return nil +} + +func checkApiResources(request *requests.PromoteRequest) error { + // judge k8s version + leafVersion, err := request.LeafDiscoveryClient.ServerVersion() + if err != nil { + return err + } + rootVersion, err := request.RootDiscoveryClient.ServerVersion() + if err != nil { + return err + } + if !strings.EqualFold(leafVersion.GitVersion, rootVersion.GitVersion) { + return fmt.Errorf("kubernetes version is not same in leaf cluster and rootcluster") + } + + includedResources := request.Spec.IncludedNamespaceScopedResources + excludedResources := request.Spec.ExcludedNamespaceScopedResources + + for _, excludedResource := range excludedResources { + if excludedResource == "*" { + return fmt.Errorf("precheck failed, excluded resources has \"*\" ") + } + } + + for _, includedResource := range includedResources { + // add all resources to includedResources + if includedResource == "*" { + // gets all preferred api resources for the leaf cluster + leafApiResourcesMap, err := getApiResourcesMap(request.LeafClientSet) + if err != nil { + return fmt.Errorf("precheck failed, getApiResourcesMap in leaf cluster fauled, err: %s", err) + } + var tmp []string + for name := range leafApiResourcesMap { + tmp = append(tmp, name) + } + includedResources = tmp + break + } + } + + // needsStringMap is excludedResources converted into map + excludeMap, err := utils.ToMapSetE(excludedResources) + if err != nil { + return fmt.Errorf("includedResources convert to map failed, err: %s", err) + } + excludeStringMap := make(map[string]string) + for _, value := range excludeMap.(map[interface{}]interface{}) { + valueString := value.(string) + excludeStringMap[valueString] = valueString + } + + // get all native api resources + nativeApiResourcesMap, err := getNativeApiResourcesMap(request.LeafClientSet, request.LeafDynamicClient) + if err != nil { + return fmt.Errorf("get native api resource failed, err: %s", err) + } + + // get all crds in leaf + leafCRDList, err := listCRD(request.LeafDynamicClient) + if err != nil { + return fmt.Errorf("leaf client get crd failed, err: %s", err) + } + leafCRDMap, err := utils.ToMapSetE(leafCRDList) + if err != nil { + return fmt.Errorf("includedResources convert to map failed, err: %s", err) + } + leafCRDStringMap := make(map[string]*apiextensionsv1.CustomResourceDefinition) + for _, value := range leafCRDMap.(map[interface{}]interface{}) { + crd := value.(*apiextensionsv1.CustomResourceDefinition) + leafCRDStringMap[crd.Name] = crd + } + + // get all crds in root + rootCRDList, err := listCRD(request.RootDynamicClient) + if err != nil { + return fmt.Errorf("root client get crd failed, err: %s", err) + } + rootCRDMap, err := utils.ToMapSetE(rootCRDList) + if err != nil { + return fmt.Errorf("includedResources convert to map failed, err: %s", err) + } + rootCRDStringMap := make(map[string]*apiextensionsv1.CustomResourceDefinition) + for _, value := range rootCRDMap.(map[interface{}]interface{}) { + crd := value.(*apiextensionsv1.CustomResourceDefinition) + rootCRDStringMap[crd.Name] = crd + } + + // judge whether the preferred version of resources for root cluster and leaf cluster is the same + for _, indcludeResource := range includedResources { + // not judge excluded resource + if _, ok := excludeStringMap[indcludeResource]; ok { + continue + } + // not judge native api resource + if _, ok := nativeApiResourcesMap[indcludeResource]; ok { + continue + } + + leafCRD, ok := leafCRDStringMap[indcludeResource] + if ok { + return fmt.Errorf("crd %s do not exist in the leaf cluster", leafCRD.Name) + } + rootCRD, ok := rootCRDStringMap[indcludeResource] + if ok { + return fmt.Errorf("crd %s do not exist in the root cluster", rootCRD.Name) + } + if !strings.EqualFold(leafCRD.Spec.Versions[0].Name, rootCRD.Spec.Versions[0].Name) { + return fmt.Errorf("crd %s version is different in that it is %s in leaf cluster and %s in root cluster", + rootCRD.Name, leafCRD.Spec.Versions[0].Name, rootCRD.Spec.Versions[0].Name) + } + } + + return nil +} + +func getNativeApiResourcesMap(clientSet kubernetes.Interface, dynamicClient dynamic.Interface) (map[string]string, error) { + nativeApiResourcesMap, err := getApiResourcesMap(clientSet) + if err != nil { + return nil, fmt.Errorf("precheck failed, getApiResourcesMap in leaf cluster fauled, err: %s", err) + } + + leafCRDList, err := listCRD(dynamicClient) + if err != nil { + return nil, fmt.Errorf("leaf client get crd failed, err: %s", err) + } + for _, crd := range leafCRDList { + delete(nativeApiResourcesMap, crd.Name) + } + return nativeApiResourcesMap, nil +} + +// getApiResourcesMap gets all preferred api resources for cluster +func getApiResourcesMap(clientSet kubernetes.Interface) (map[string]string, error) { + apiResources, err := clientSet.Discovery().ServerPreferredResources() + if err != nil { + return nil, fmt.Errorf("get api-reources in leaf failed, err: %s", err) + } + apiResourcesMap, err := utils.ToMapSetE(apiResources) + if err != nil { + return nil, fmt.Errorf("apiResources convert to map failed, err: %s", err) + } + apiResourcesStringMap := make(map[string]string) + for _, value := range apiResourcesMap.(map[interface{}]interface{}) { + valueString := value.(*metav1.APIResourceList) + groupVersion := valueString.GroupVersion + var group, version string + if i := strings.Index(valueString.GroupVersion, "/"); i >= 0 { + group = groupVersion[:i] + version = groupVersion[i+1:] + } else { + group = "" + version = groupVersion + } + for _, resource := range valueString.APIResources { + nameGroup := resource.Name + if group != "" { + nameGroup = fmt.Sprintf("%s.%s", nameGroup, group) + } + apiResourcesStringMap[nameGroup] = version + } + } + return apiResourcesStringMap, nil +} + +func checkNamespaces(request *requests.PromoteRequest, forbidNamespace []string) error { + includes := request.NamespaceIncludesExcludes.GetIncludes() + excludes := request.NamespaceIncludesExcludes.GetExcludes() + leafNamespaceList, err := request.LeafClientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + rootNamespaceList, err := request.RootClientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + // it is meaningless to include * in exclude + for _, exclude := range excludes { + if exclude == "*" { + return fmt.Errorf("precheck failed, excludes has \"*\" ") + } + } + + for _, include := range includes { + // add all resources to includes + if include == "*" { + var tmp []string + for _, item := range leafNamespaceList.Items { + tmp = append(tmp, item.Name) + } + includes = tmp + break + } + } + + // needsStringMap removes namespace from exclude + needsMap, err := utils.ToMapSetE(includes) + if err != nil { + return fmt.Errorf("includes convert to map failed, err: %s", err) + } + needsStringMap := make(map[string]string) + for _, value := range needsMap.(map[interface{}]interface{}) { + valueString := value.(string) + needsStringMap[valueString] = valueString + } + + for _, exclude := range excludes { + value, found := needsStringMap[exclude] + if !found { + return fmt.Errorf("excludes has wrong namespace: %s", value) + } + delete(needsStringMap, exclude) + } + + for _, forbid := range forbidNamespace { + if _, ok := needsStringMap[forbid]; ok { + return fmt.Errorf("promote this %s namesapcethe is forbidden", forbid) + } + } + + // judge whether the leaf cluster contains the namespace + leafNamespaceMap, err := utils.ToMapSetE(leafNamespaceList.Items) + if err != nil { + return fmt.Errorf("leafNamespaceList convert to map failed, err: %s", err) + } + leafNamespaceStringMap := make(map[string]corev1.Namespace) + for _, value := range leafNamespaceMap.(map[interface{}]interface{}) { + namespace := value.(corev1.Namespace) + leafNamespaceStringMap[namespace.Name] = namespace + } + + for _, need := range needsStringMap { + if _, ok := leafNamespaceStringMap[need]; !ok { + return fmt.Errorf("precheck failed, leaf cluster don't have this namespace: %s", need) + } + } + + // judge whether the master cluster already contains the namespace in include + rootNamespaceMap, err := utils.ToMapSetE(rootNamespaceList.Items) + if err != nil { + return fmt.Errorf("rootNamespaceList convert to map failed, err: %s", err) + } + rootNamespaceStringMap := make(map[string]corev1.Namespace) + for _, value := range rootNamespaceMap.(map[interface{}]interface{}) { + namespace := value.(corev1.Namespace) + rootNamespaceStringMap[namespace.Name] = namespace + } + for _, need := range needsStringMap { + if _, ok := rootNamespaceStringMap[need]; ok { + return fmt.Errorf("precheck failed, the same namespace exists for the master cluster and leaf cluster: %s", need) + } + } + return nil +} + +// listCRD retrieves the list of crds from Kubernetes. +func listCRD(dynamicClient dynamic.Interface) ([]*apiextensionsv1.CustomResourceDefinition, error) { + objs, err := dynamicClient.Resource(constants.GVR_CRD).List(context.TODO(), metav1.ListOptions{}) + + if err != nil { + return nil, err + } + + retObj := make([]*apiextensionsv1.CustomResourceDefinition, 0) + + for _, obj := range objs.Items { + tmpObj := &apiextensionsv1.CustomResourceDefinition{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &tmpObj); err != nil { + return nil, err + } + retObj = append(retObj, tmpObj) + } + + return retObj, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/promote_policy_controller.go b/pkg/clustertree/cluster-manager/controllers/promote/promote_policy_controller.go new file mode 100644 index 000000000..2585bd3d6 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/promote_policy_controller.go @@ -0,0 +1,339 @@ +package promote + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kosmos.io/kosmos/cmd/clustertree/cluster-manager/app/options" + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/backup" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/constants" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/detach" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/precheck" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/restore" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/collections" + leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" +) + +const ( + PromotePolicyControllerName = "promote-policy-controller" + RequeueTime = 10 * time.Second +) + +type PromotePolicyController struct { + RootClient client.Client + RootClientSet kubernetes.Interface + RootDynamicClient *dynamic.DynamicClient + RootDiscoveryClient *discovery.DiscoveryClient + GlobalLeafManager leafUtils.LeafResourceManager + PromotePolicyOptions options.PromotePolicyOptions +} + +func (p *PromotePolicyController) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr).Named(PromotePolicyControllerName). + For(&v1alpha1.PromotePolicy{}). + Complete(p) +} + +func (p *PromotePolicyController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + original := &v1alpha1.PromotePolicy{} + if err := p.RootClient.Get(ctx, request.NamespacedName, original); err != nil { + if apierrors.IsNotFound(err) { + klog.Infof("promotepolicy %s not found", original.Name) + return ctrl.Result{}, nil + } + klog.Errorf("error getting promotepolicy %s: %v", original.Name, err) + return ctrl.Result{}, nil + } + + lr, err := p.GlobalLeafManager.GetLeafResourceByNodeName("kosmos-" + original.Spec.ClusterName) + if err != nil { + // wait for leaf resource init + klog.Errorf("Error get kosmos leaf %s resource. %v", original.Spec.ClusterName, err) + return reconcile.Result{RequeueAfter: RequeueTime}, nil + } + + promoteRequest, err := p.preparePromoteRequest(original, lr) + if err != nil { + return reconcile.Result{}, fmt.Errorf("error prepare promoteRequest: %v", err) + } + + switch original.Status.Phase { + case "": + //create promotepolicy request + case v1alpha1.PromotePolicyPhaseCompleted, v1alpha1.PromotePolicyPhaseFailedRollback: + // check if Rollback request + if original.Spec.Rollback == "true" { + klog.Info("rollback start...") + promoteRequest.Spec.Rollback = "" + err = DetachRollback(promoteRequest, original.Status.BackedupFile, true) + if err != nil { + klog.Errorf("rollback detached resources err: %s", err.Error()) + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseFailedRollback + promoteRequest.Status.FailureReason = err.Error() + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + return ctrl.Result{}, nil + } + + err = RestoreRollback(promoteRequest, original.Status.BackedupFile, true) + if err != nil { + klog.Errorf("rollback restored resources err: %s", err.Error()) + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseFailedRollback + promoteRequest.Status.FailureReason = err.Error() + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + return ctrl.Result{}, nil + } + + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseRolledback + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + } + + return ctrl.Result{}, nil + default: + klog.Infof("promotePolicy %s status %s will not handled", original.Name, original.Status.Phase) + return ctrl.Result{}, nil + } + + err = runPrecheck(promoteRequest) + if err != nil { + klog.Errorf("precheck err: %s", err.Error()) + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseFailedPrecheck + promoteRequest.Status.FailureReason = err.Error() + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + return reconcile.Result{}, err + } + + backupFile, err := runBackup(promoteRequest) + if err != nil { + klog.Errorf("backup resources err: %s", err.Error()) + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseFailedBackup + promoteRequest.Status.FailureReason = err.Error() + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + return reconcile.Result{}, err + } + klog.Infof("backup success. file: %s", backupFile) + + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseDetach + promoteRequest.Status.BackedupFile = backupFile + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + + err = runDetach(promoteRequest, backupFile) + if err != nil { + klog.Errorf("detach resources err: %s", err.Error()) + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseFailedDetach + promoteRequest.Status.FailureReason = err.Error() + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + + klog.Warning("Begin rollback detached resources because detach stage failed.") + time.Sleep(5 * time.Second) + err = DetachRollback(promoteRequest, backupFile, false) + if err != nil { + klog.Errorf("rollback detached resource err: %s", err.Error()) + } else { + klog.Info("all detached resource rollback suceess.") + } + return reconcile.Result{}, err + } + + err = runRestore(promoteRequest, backupFile) + if err != nil { + klog.Errorf("restore resources err: %s", err.Error()) + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseFailedRestore + promoteRequest.Status.FailureReason = err.Error() + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + + klog.Warning("Begin rollback detached and restored resources because restore stage failed.") + time.Sleep(5 * time.Second) + err = DetachRollback(promoteRequest, backupFile, true) + if err != nil { + klog.Errorf("rollback detached resource err: %s", err.Error()) + } else { + klog.Info("all detached resource rollback suceess.") + } + + err = RestoreRollback(promoteRequest, backupFile, false) + if err != nil { + klog.Errorf("rollback restored resource err: %s", err.Error()) + } else { + klog.Info("all restored resource rollback suceess.") + } + return reconcile.Result{}, err + } + + promoteRequest.Status.Phase = v1alpha1.PromotePolicyPhaseCompleted + if err = p.updateStatus(original, promoteRequest.PromotePolicy); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "error updating promotepolicy %s to status %s", original.Name, promoteRequest.Status.Phase) + } + + klog.Infof("Create promotePolicy %s completed", original.Name) + + return reconcile.Result{}, nil +} + +func (p *PromotePolicyController) updateStatus(original *v1alpha1.PromotePolicy, updatedObj *v1alpha1.PromotePolicy) error { + return p.RootClient.Patch(context.TODO(), updatedObj, client.MergeFrom(original)) +} + +func (p *PromotePolicyController) preparePromoteRequest(promote *v1alpha1.PromotePolicy, lf *leafUtils.LeafResource) (*requests.PromoteRequest, error) { + // todo validate params + + request := &requests.PromoteRequest{ + PromotePolicy: promote.DeepCopy(), + RootClientSet: p.RootClientSet, + RootDynamicClient: p.RootDynamicClient, + RootDiscoveryClient: p.RootDiscoveryClient, + LeafClientSet: lf.Clientset, + LeafDynamicClient: lf.DynamicClient, + LeafDiscoveryClient: lf.DiscoveryClient, + NamespaceIncludesExcludes: collections.NewIncludesExcludes().Includes(promote.Spec.IncludedNamespaces...).Excludes(promote.Spec.ExcludedNamespaces...), + BackedUpItems: make(map[types.ItemKey]struct{}), + DetachedItems: make(map[types.ItemKey]struct{}), + RestoredItems: make(map[types.ItemKey]types.RestoredItemStatus), + ForbidNamespaces: p.PromotePolicyOptions.ForbidNamespaces, + } + return request, nil +} + +func runPrecheck(promoteRequest *requests.PromoteRequest) error { + klog.Info("start precheck...") + prechecker, err := precheck.NewKubernetesPrecheck(promoteRequest) + if err != nil { + return errors.Wrap(err, "error new precheck instance") + } + + err = prechecker.Precheck() + if err != nil { + return errors.Wrap(err, "error precheck") + } + + return nil +} + +func runBackup(promoteRequest *requests.PromoteRequest) (file string, err error) { + klog.Info("start backup resources") + filePath := constants.BackupDir + promoteRequest.Name + time.Now().Format("20060102-150405") + backupFile, err := os.Create(filePath) + if err != nil { + return "", errors.Wrap(err, "error creating temp file for backup") + } + defer backupFile.Close() + + backuper, err := backup.NewKubernetesBackupper(promoteRequest) + if err != nil { + return "", errors.Wrap(err, "error new backup instance") + } + + err = backuper.Backup(backupFile) + if err != nil { + return "", errors.Wrap(err, "error backup") + } + + return filePath, nil +} + +func runDetach(promoteRequest *requests.PromoteRequest, backupfile string) error { + // 打开压缩文件 + backupReader, err := os.Open(backupfile) + if err != nil { + panic(err) + } + defer backupReader.Close() + + detacher, err := detach.NewKubernetesDetacher(promoteRequest, backupReader) + if err != nil { + return errors.Wrap(err, "error new detach instance") + } + + err = detacher.Detach() + if err != nil { + return errors.Wrap(err, "error detach") + } + + return nil +} + +func DetachRollback(promoteRequest *requests.PromoteRequest, backupfile string, detachSuccess bool) error { + backupReader, err := os.Open(backupfile) + if err != nil { + panic(err) + } + defer backupReader.Close() + + detacher, err := detach.NewKubernetesDetacher(promoteRequest, backupReader) + if err != nil { + return errors.Wrap(err, "error new detach instance") + } + + err = detacher.Rollback(detachSuccess) + if err != nil { + return errors.Wrap(err, "error detach") + } + return nil +} + +func RestoreRollback(promoteRequest *requests.PromoteRequest, backupfile string, restoreSuccess bool) error { + backupReader, err := os.Open(backupfile) + if err != nil { + panic(err) + } + defer backupReader.Close() + + restorer, err := restore.NewKubernetesRestorer(promoteRequest, backupReader) + if err != nil { + return errors.Wrap(err, "error new restore instance") + } + err = restorer.Rollback(restoreSuccess) + if err != nil { + return errors.Wrap(err, "error restore") + } + return nil +} + +func runRestore(promoteRequest *requests.PromoteRequest, backupfile string) error { + backupReader, err := os.Open(backupfile) + if err != nil { + panic(err) + } + defer backupReader.Close() + + restorer, err := restore.NewKubernetesRestorer(promoteRequest, backupReader) + if err != nil { + return errors.Wrap(err, "error new restore instance") + } + err = restorer.Restore() + if err != nil { + return errors.Wrap(err, "error restore") + } + + return nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/requests/request.go b/pkg/clustertree/cluster-manager/controllers/promote/requests/request.go new file mode 100644 index 000000000..9e2fbf8b5 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/requests/request.go @@ -0,0 +1,133 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package velero contains the interfaces necessary to implement +// all of the Velero plugins. Users create their own binary containing +// implementations of the plugin kinds in this package. Multiple +// plugins of any type can be implemented. +package requests + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/collections" +) + +type PromoteRequest struct { + *v1alpha1.PromotePolicy + RootClient client.Client + RootClientSet kubernetes.Interface + RootDynamicClient *dynamic.DynamicClient + RootDiscoveryClient *discovery.DiscoveryClient + + LeafClientSet kubernetes.Interface + LeafDynamicClient dynamic.Interface + LeafDiscoveryClient *discovery.DiscoveryClient + + ResourceIncludesExcludes collections.IncludesExcludesInterface + NamespaceIncludesExcludes *collections.IncludesExcludes + BackedUpItems map[types.ItemKey]struct{} + DetachedItems map[types.ItemKey]struct{} + RestoredItems map[types.ItemKey]types.RestoredItemStatus + + ForbidNamespaces []string +} + +// ResourceSelector is a collection of included/excluded namespaces, +// included/excluded resources, and a label-selector that can be used +// to match a set of items from a cluster. +type ResourceSelector struct { + // IncludedNamespaces is a slice of namespace names to match. All + // namespaces in this slice, except those in ExcludedNamespaces, + // will be matched. A nil/empty slice matches all namespaces. + IncludedNamespaces []string + // ExcludedNamespaces is a slice of namespace names to exclude. + // All namespaces in IncludedNamespaces, *except* those in + // this slice, will be matched. + ExcludedNamespaces []string + // IncludedResources is a slice of resources to match. Resources may be specified + // as full names (e.g. "services"), abbreviations (e.g. "svc"), or with the + // groups they are in (e.g. "ingresses.extensions"). All resources in this slice, + // except those in ExcludedResources, will be matched. A nil/empty slice matches + // all resources. + IncludedResources []string + // ExcludedResources is a slice of resources to exclude. Resources may be specified + // as full names (e.g. "services"), abbreviations (e.g. "svc"), or with the + // groups they are in (e.g. "ingresses.extensions"). All resources in IncludedResources, + // *except* those in this slice, will be matched. + ExcludedResources []string + // LabelSelector is a string representation of a selector to apply + // when matching resources. See "k8s.io/apimachinery/pkg/labels".Parse() + // for details on syntax. + LabelSelector string +} + +// Applicable allows actions and plugins to specify which resources they should be invoked for +type Applicable interface { + // AppliesTo returns information about which resources this Responder should be invoked for. + AppliesTo() (ResourceSelector, error) +} + +// ResourceIdentifier describes a single item by its group, resource, namespace, and name. +type ResourceIdentifier struct { + schema.GroupResource + Namespace string + Name string +} + +func (in *ResourceIdentifier) DeepCopy() *ResourceIdentifier { + if in == nil { + return nil + } + out := new(ResourceIdentifier) + in.DeepCopyInto(out) + return out +} + +func (in *ResourceIdentifier) DeepCopyInto(out *ResourceIdentifier) { + *out = *in + out.GroupResource = in.GroupResource +} + +// OperationProgress describes progress of an asynchronous plugin operation. +type OperationProgress struct { + // True when the operation has completed, either successfully or with a failure + Completed bool + // Set when the operation has failed + Err string + // Quantity completed so far and the total quantity associated with the operation + // in OperationUnits. For data mover and volume snapshotter use cases, this will + // usually be in bytes. On successful completion, NCompleted and NTotal should be + // the same + NCompleted, NTotal int64 + // Units represented by NCompleted and NTotal -- for data mover and item + // snapshotters, this will usually be bytes. + OperationUnits string + // Optional description of operation progress (i.e. "Current phase: Running") + Description string + // When the operation was started and when the last update was seen. Not all + // systems retain when the upload was begun, return Time 0 (time.Unix(0, 0)) + // if unknown. + Started, Updated time.Time +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/pod_restore_action.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/pod_restore_action.go new file mode 100644 index 000000000..cb9b3b6dd --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/pod_restore_action.go @@ -0,0 +1,87 @@ +package restore + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type PodAction struct { +} + +func NewPodAction() *PodAction { + return &PodAction{} +} + +func (p *PodAction) Resource() []string { + return []string{"pods"} +} + +func (p *PodAction) Execute(obj *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedPod := new(corev1.Pod) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, updatedPod); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to pod") + } + + updatedPod.Spec.NodeName = restorer.kosmosNodeName + + kosmosNodeToleration := corev1.Toleration{ + Key: "kosmos.io/node", + Value: "true", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + } + tolerations := updatedPod.Spec.Tolerations + if tolerations == nil { + tolerations = make([]corev1.Toleration, 1) + tolerations[0] = kosmosNodeToleration + } else { + kosmosTolerationExist := false + for _, toleration := range tolerations { + if toleration.Key == "kosmos.io/node" { + kosmosTolerationExist = true + break + } + } + if !kosmosTolerationExist { + updatedPod.Spec.Tolerations = append(updatedPod.Spec.Tolerations, kosmosNodeToleration) + } + } + + labels := updatedPod.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + labels["kosmos-io/pod"] = "true" + labels["kosmos-io/synced"] = "true" + updatedPod.SetLabels(labels) + + podMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedPod) + if err != nil { + return nil, errors.Wrap(err, "unable to convert pod to unstructured item") + } + return &unstructured.Unstructured{Object: podMap}, nil +} + +func (p *PodAction) Revert(fromCluster *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedPod := new(corev1.Pod) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(fromCluster.Object, updatedPod); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to pod") + } + + labels := updatedPod.GetLabels() + if labels != nil { + if _, ok := labels["kosmos-io/pod"]; ok { + delete(labels, "kosmos-io/pod") + updatedPod.SetLabels(labels) + podMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedPod) + if err != nil { + return nil, errors.Wrap(err, "unable to convert pod to unstructured item") + } + return &unstructured.Unstructured{Object: podMap}, nil + } + } + + return fromCluster, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/priority.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/priority.go new file mode 100644 index 000000000..5897c3af7 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/priority.go @@ -0,0 +1,92 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "fmt" + "strings" +) + +const ( + prioritySeparator = "-" +) + +// Priorities defines the desired order of resource operations: +// Resources in the HighPriorities list will be handled first +// Resources in the LowPriorities list will be handled last +// Other resources will be handled alphabetically after the high prioritized resources and before the low prioritized resources +type Priorities struct { + HighPriorities []string + LowPriorities []string +} + +// String returns a string representation of Priority. +func (p *Priorities) String() string { + priorities := p.HighPriorities + if len(p.LowPriorities) > 0 { + priorities = append(priorities, prioritySeparator) + priorities = append(priorities, p.LowPriorities...) + } + return strings.Join(priorities, ",") +} + +// Set parses the provided string to the priority object +func (p *Priorities) Set(s string) error { + if len(s) == 0 { + return nil + } + strs := strings.Split(s, ",") + separatorIndex := -1 + for i, str := range strs { + if str == prioritySeparator { + if separatorIndex > -1 { + return fmt.Errorf("multiple priority separator %q found", prioritySeparator) + } + separatorIndex = i + } + } + // has no separator + if separatorIndex == -1 { + p.HighPriorities = strs + return nil + } + // start with separator + if separatorIndex == 0 { + // contain only separator + if len(strs) == 1 { + return nil + } + p.LowPriorities = strs[1:] + return nil + } + // end with separator + if separatorIndex == len(strs)-1 { + p.HighPriorities = strs[:len(strs)-1] + return nil + } + + // separator in the middle + p.HighPriorities = strs[:separatorIndex] + p.LowPriorities = strs[separatorIndex+1:] + + return nil +} + +// Type specifies the flag type +func (p *Priorities) Type() string { + return "stringArray" +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/pv_restore_action.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/pv_restore_action.go new file mode 100644 index 000000000..adc5c231a --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/pv_restore_action.go @@ -0,0 +1,81 @@ +package restore + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/kuberesource" +) + +type PvAction struct { +} + +func NewPvAction() *PvAction { + return &PvAction{} +} + +func (p *PvAction) Resource() []string { + return []string{"persistentvolumes"} +} + +func (p *PvAction) Execute(obj *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedPv := new(corev1.PersistentVolume) + + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), updatedPv); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to pv") + } + + claimRef := updatedPv.Spec.ClaimRef + if claimRef != nil { + gvr, resource, err := restorer.discoveryHelper.ResourceFor(kuberesource.PersistentVolumeClaims.WithVersion("")) + if err != nil { + return nil, errors.Errorf("Error getting resolved resource for %s", kuberesource.PersistentVolumeClaims) + } + + client, err := restorer.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, claimRef.Namespace) + if err != nil { + return nil, err + } + + pvcObj, err := client.Get(claimRef.Name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Errorf("Error get pvc %s, %v", claimRef.Name, err) + } + + claimRef.ResourceVersion = pvcObj.GetResourceVersion() + claimRef.UID = pvcObj.GetUID() + + pvMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedPv) + if err != nil { + return nil, errors.Wrap(err, "unable to convert pod to unstructured item") + } + return &unstructured.Unstructured{Object: pvMap}, nil + } else { + return obj, nil + } +} + +func (p *PvAction) Revert(fromCluster *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedPv := new(corev1.PersistentVolume) + + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(fromCluster.UnstructuredContent(), updatedPv); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to pv") + } + + annotations := updatedPv.Annotations + if annotations != nil { + if _, ok := annotations["kosmos-io/cluster-owners"]; ok { + delete(annotations, "kosmos-io/cluster-owners") + pvMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedPv) + if err != nil { + return nil, errors.Wrap(err, "unable to convert pod to unstructured item") + } + return &unstructured.Unstructured{Object: pvMap}, nil + } + } + + return fromCluster, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/register_action.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/register_action.go new file mode 100644 index 000000000..5a04125cb --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/register_action.go @@ -0,0 +1,88 @@ +package restore + +import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" +) + +type RestoreItemAction interface { + // return resource.group + Resource() []string + + // Execute allows the ItemAction to perform arbitrary logic with the item being backed up, + // including mutating the item itself prior to backup. The item (unmodified or modified) + // should be returned, along with an optional slice of ResourceIdentifiers specifying + // additional related items that should be backed up. + Execute(obj *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) + + Revert(fromCluster *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) +} + +func registerRestoreActions() (map[string]RestoreItemAction, error) { + actionMap := make(map[string]RestoreItemAction, 3) + + err := registerRestoreItemAction(actionMap, newPodRestoreItemAction) + if err != nil { + return nil, errors.WithMessage(err, "register pod restore action error") + } + + err = registerRestoreItemAction(actionMap, newPvRestoreItemAction) + if err != nil { + return nil, errors.WithMessage(err, "register pv restore action error") + } + + err = registerRestoreItemAction(actionMap, newStsDeployRestoreItemAction) + if err != nil { + return nil, errors.WithMessage(err, "register sts/deploy restore action error") + } + + err = registerRestoreItemAction(actionMap, newServiceRestoreItemAction) + if err != nil { + return nil, errors.WithMessage(err, "register service restore action error") + } + + err = registerRestoreItemAction(actionMap, newUniversalRestoreItemAction) + if err != nil { + return nil, errors.WithMessage(err, "register universal restore action error") + } + + return actionMap, nil +} + +func registerRestoreItemAction(actionsMap map[string]RestoreItemAction, initializer types.HandlerInitializer) error { + instance, err := initializer() + if err != nil { + return errors.WithMessage(err, "init restore action instance error") + } + + itemAction, ok := instance.(RestoreItemAction) + if !ok { + return errors.Errorf("%T is not a backup item action", instance) + } + for _, resource := range itemAction.Resource() { + actionsMap[resource] = itemAction + } + return nil +} + +func newPodRestoreItemAction() (interface{}, error) { + return NewPodAction(), nil +} + +func newPvRestoreItemAction() (interface{}, error) { + return NewPvAction(), nil +} + +func newStsDeployRestoreItemAction() (interface{}, error) { + return NewStsDeployAction(), nil +} + +func newServiceRestoreItemAction() (interface{}, error) { + return NewServiceAction(), nil +} + +func newUniversalRestoreItemAction() (interface{}, error) { + return NewUniversalAction(), nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/restore.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/restore.go new file mode 100644 index 000000000..25cf00c57 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/restore.go @@ -0,0 +1,790 @@ +package restore + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/sets" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/client" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/constants" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/discovery" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/kuberesource" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/requests" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/types" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/archive" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/kube" +) + +/* +High priorities: + - Custom Resource Definitions come before Custom Resource so that they can be + restored with their corresponding CRD. + - Namespaces go second because all namespaced resources depend on them. + - Storage Classes are needed to create PVs and PVCs correctly. + - VolumeSnapshotClasses are needed to provision volumes using volumesnapshots + - VolumeSnapshotContents are needed as they contain the handle to the volume snapshot in the + storage provider + - VolumeSnapshots are needed to create PVCs using the VolumeSnapshot as their data source. + - DataUploads need to restore before PVC for Snapshot DataMover to work, because PVC needs the DataUploadResults to create DataDownloads. + - PVs go before PVCs because PVCs depend on them. + - PVCs go before pods or controllers so they can be mounted as volumes. + - Service accounts go before secrets so service account token secrets can be filled automatically. + - Secrets and ConfigMaps go before pods or controllers so they can be mounted + as volumes. + - Limit ranges go before pods or controllers so pods can use them. + - Pods go before controllers so they can be explicitly restored and potentially + have pod volume restores run before controllers adopt the pods. + - Replica sets go before deployments/other controllers so they can be explicitly + restored and be adopted by controllers. + - Services go before Clusters so they can be adopted by AKO-operator and no new Services will be created + for the same clusters +*/ +var defaultRestorePriorities = Priorities{ + HighPriorities: []string{ + "customresourcedefinitions", + "namespaces", + "persistentvolumeclaims", + "persistentvolumes", + "serviceaccounts", + "roles.rbac.authorization.k8s.io", + "rolebindings.rbac.authorization.k8s.io", + "secrets", + "configmaps", + "pods", + "replicasets.apps", + "deployments.apps", + "statefulsets.apps", + "services", + }, + LowPriorities: []string{}, +} + +// kubernetesRestorer implements Restorer for restoring into a Kubernetes cluster. +type kubernetesRestorer struct { + request *requests.PromoteRequest + discoveryHelper discovery.Helper + dynamicFactory client.DynamicFactory + fileSystem filesystem.Interface + restoreDir string + actions map[string]RestoreItemAction + namespaceClient corev1.NamespaceInterface + resourceClients map[resourceClientKey]client.Dynamic + resourceTerminatingTimeout time.Duration + backupReader io.Reader + kosmosClusterName string + kosmosNodeName string +} + +// restoreableResource represents map of individual items of each resource +// identifier grouped by their original namespaces. +type restoreableResource struct { + resource string + selectedItemsByNamespace map[string][]restoreableItem + totalItems int +} + +// restoreableItem represents an item by its target namespace contains enough +// information required to restore the item. +type restoreableItem struct { + path string + targetNamespace string + name string + version string // used for initializing informer cache +} + +type resourceClientKey struct { + resource schema.GroupVersionResource + namespace string +} + +func NewKubernetesRestorer(request *requests.PromoteRequest, backupReader io.Reader) (*kubernetesRestorer, error) { + dynamicFactory := client.NewDynamicFactory(request.RootDynamicClient) + discoveryHelper, err := discovery.NewHelper(request.RootDiscoveryClient) + if err != nil { + return nil, err + } + + actions, err := registerRestoreActions() + if err != nil { + return nil, err + } + return &kubernetesRestorer{ + request: request, + discoveryHelper: discoveryHelper, + dynamicFactory: dynamicFactory, + namespaceClient: request.RootClientSet.CoreV1().Namespaces(), + resourceTerminatingTimeout: 10 * time.Minute, + fileSystem: filesystem.NewFileSystem(), + backupReader: backupReader, + kosmosClusterName: request.Spec.ClusterName, + kosmosNodeName: "kosmos-" + request.Spec.ClusterName, + resourceClients: make(map[resourceClientKey]client.Dynamic), + actions: actions, + }, nil +} + +func (kr *kubernetesRestorer) Restore() error { + klog.Infof("Starting restore of backup") + + defer func() { + // todo rollback if needed? + }() + + dir, err := archive.NewExtractor(kr.fileSystem).UnzipAndExtractBackup(kr.backupReader) + if err != nil { + return errors.Errorf("error unzipping and extracting: %v", err) + } + defer func() { + if err := kr.fileSystem.RemoveAll(dir); err != nil { + klog.Errorf("error removing temporary directory %s: %s", dir, err.Error()) + } + }() + + // Need to set this for additionalItems to be restored. + kr.restoreDir = dir + + backupResources, err := archive.NewParser(kr.fileSystem).Parse(kr.restoreDir) + // If ErrNotExist occurs, it implies that the backup to be restored includes zero items. + // Need to add a warning about it and jump out of the function. + if errors.Cause(err) == archive.ErrNotExist { + return errors.Wrap(err, "zero items to be restored") + } + if err != nil { + return errors.Wrap(err, "error parsing backup contents") + } + + klog.Infof("total backup resources size: %v", len(backupResources)) + + // totalItems: previously discovered items, i: iteration counter. + processedItems, existingNamespaces := 0, sets.KeySet(make(map[string]struct{})) + + klog.Infof("Restore everything order by defaultRestorePriorities") + // Restore everything else + selectedResourceCollection, _, err := kr.getOrderedResourceCollection( + backupResources, + make([]restoreableResource, 0), + sets.KeySet(make(map[string]string)), + defaultRestorePriorities, + true, + ) + if err != nil { + return errors.Wrap(err, "getOrderedResourceCollection err") + } + + klog.Infof("resource collection size: %s", len(selectedResourceCollection)) + + for _, selectedResource := range selectedResourceCollection { + // Restore this resource + processedItems, err = kr.processSelectedResource( + selectedResource, + processedItems, + existingNamespaces, + ) + if err != nil { + return errors.Wrap(err, "processSelectedResource err") + } + } + + return nil +} + +func (kr *kubernetesRestorer) Rollback(allRestored bool) error { + dir, err := archive.NewExtractor(kr.fileSystem).UnzipAndExtractBackup(kr.backupReader) + if err != nil { + return errors.Errorf("error unzipping and extracting: %v", err) + } + defer func() { + if err := kr.fileSystem.RemoveAll(dir); err != nil { + klog.Errorf("error removing temporary directory %s: %s", dir, err.Error()) + } + }() + + // Need to set this for additionalItems to be restored. + kr.restoreDir = dir + + backupResources, err := archive.NewParser(kr.fileSystem).Parse(kr.restoreDir) + // If ErrNotExist occurs, it implies that the backup to be restored includes zero items. + // Need to add a warning about it and jump out of the function. + if errors.Cause(err) == archive.ErrNotExist { + return errors.Wrap(err, "zero items to be restored") + } + if err != nil { + return errors.Wrap(err, "error parsing backup contents") + } + + klog.Infof("total backup resources size: %v", len(backupResources)) + + var highProprites []string + highProprites = append(highProprites, defaultRestorePriorities.HighPriorities...) + reversSlice(highProprites) + unestorePriorities := Priorities{ + HighPriorities: highProprites, + LowPriorities: defaultRestorePriorities.LowPriorities, + } + + selectedResourceCollection, _, err := kr.getOrderedResourceCollection( + backupResources, + make([]restoreableResource, 0), + sets.KeySet(make(map[string]string)), + unestorePriorities, + true, + ) + if err != nil { + return errors.Wrap(err, "getOrderedResourceCollection err") + } + + for _, selectedResource := range selectedResourceCollection { + // Restore this resource + err = kr.deleteSelectedResource(selectedResource, allRestored) + if err != nil { + return errors.Wrap(err, "deleteSelectedResource err") + } + } + + return nil +} + +// getOrderedResourceCollection iterates over list of ordered resource +// identifiers, applies resource include/exclude criteria, and Kubernetes +// selectors to make a list of resources to be actually restored preserving the +// original order. +func (kr *kubernetesRestorer) getOrderedResourceCollection( + backupResources map[string]*archive.ResourceItems, + restoreResourceCollection []restoreableResource, + processedResources sets.Set[string], + resourcePriorities Priorities, + includeAllResources bool, +) ([]restoreableResource, sets.Set[string], error) { + var resourceList []string + if includeAllResources { + resourceList = getOrderedResources(resourcePriorities, backupResources) + } else { + resourceList = resourcePriorities.HighPriorities + } + + for _, resource := range resourceList { + // try to resolve the resource via discovery to a complete group/version/resource + gvr, _, err := kr.discoveryHelper.ResourceFor(schema.ParseGroupResource(resource).WithVersion("")) + if err != nil { + klog.Infof("Skipping restore of resource %s because it cannot be resolved via discovery", resource) + continue + } + groupResource := gvr.GroupResource() + + // Check if we've already restored this resource (this would happen if + // the resource we're currently looking at was already restored because + // it was a prioritized resource, and now we're looking at it as part of + // the backup contents). + if processedResources.Has(groupResource.String()) { + klog.Infof("Skipping restore of resource %s because it's already been processed", groupResource.String()) + continue + } + + // Check if the resource should be restored according to the resource + // includes/excludes. + + // Check if the resource is present in the backup + resourceList := backupResources[groupResource.String()] + if resourceList == nil { + klog.Infof("Skipping restore of resource %s because it's not present in the backup tarball", groupResource.String()) + continue + } + + // Iterate through each namespace that contains instances of the + // resource and append to the list of to-be restored resources. + for namespace, items := range resourceList.ItemsByNamespace { + res, err := kr.getSelectedRestoreableItems(groupResource.String(), namespace, items) + if err != nil { + return nil, nil, errors.Wrap(err, "getSelectedRestoreableItems err") + } + + restoreResourceCollection = append(restoreResourceCollection, res) + } + + // record that we've restored the resource + processedResources.Insert(groupResource.String()) + } + return restoreResourceCollection, processedResources, nil +} + +// Process and restore one restoreableResource from the backup and update restore progress +// metadata. At this point, the resource has already been validated and counted for inclusion +// in the expected total restore count. +func (kr *kubernetesRestorer) processSelectedResource( + selectedResource restoreableResource, + processedItems int, + existingNamespaces sets.Set[string], +) (int, error) { + groupResource := schema.ParseGroupResource(selectedResource.resource) + + for namespace, selectedItems := range selectedResource.selectedItemsByNamespace { + for _, selectedItem := range selectedItems { + if groupResource == kuberesource.Namespaces { + namespace = selectedItem.name + } + + // If we don't know whether this namespace exists yet, attempt to create + // it in order to ensure it exists. Try to get it from the backup tarball + // (in order to get any backed-up metadata), but if we don't find it there, + // create a blank one. + if namespace != "" && !existingNamespaces.Has(selectedItem.targetNamespace) { + ns := getNamespace( + archive.GetItemFilePath(kr.restoreDir, "namespaces", "", namespace), + selectedItem.targetNamespace, + ) + _, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady( + ns, + kr.namespaceClient, + kr.resourceTerminatingTimeout, + ) + if err != nil { + return processedItems, err + } + + // Add the newly created namespace to the list of restored items. + if nsCreated { + itemKey := types.ItemKey{ + Resource: groupResource.String(), + Namespace: ns.Namespace, + Name: ns.Name, + } + kr.request.RestoredItems[itemKey] = types.RestoredItemStatus{Action: constants.ItemRestoreResultCreated, ItemExists: true} + } + + // Keep track of namespaces that we know exist so we don't + // have to try to create them multiple times. + existingNamespaces.Insert(selectedItem.targetNamespace) + } + + // For namespaces resources we don't need to following steps + if groupResource == kuberesource.Namespaces { + continue + } + + obj, err := archive.Unmarshal(kr.fileSystem, selectedItem.path) + if err != nil { + if err != nil { + return processedItems, errors.Errorf("error decoding %q: %v", strings.Replace(selectedItem.path, kr.restoreDir+"/", "", -1), err) + } + } + + _, err = kr.restoreItem(obj, groupResource, selectedItem.targetNamespace) + if err != nil { + return processedItems, errors.Wrap(err, "restoreItem error") + } + processedItems++ + } + } + + return processedItems, nil +} + +func (kr *kubernetesRestorer) deleteSelectedResource(selectedResource restoreableResource, allRestored bool) error { + groupResource := schema.ParseGroupResource(selectedResource.resource) + + for _, selectedItems := range selectedResource.selectedItemsByNamespace { + for _, selectedItem := range selectedItems { + obj, err := archive.Unmarshal(kr.fileSystem, selectedItem.path) + if err != nil { + if err != nil { + return errors.Errorf("error decoding %q: %v", strings.Replace(selectedItem.path, kr.restoreDir+"/", "", -1), err) + } + } + + if !allRestored { + item := types.ItemKey{ + Resource: groupResource.String(), + Name: selectedItem.name, + Namespace: selectedItem.targetNamespace, + } + + if _, ok := kr.request.RestoredItems[item]; !ok { + // unrestored resource, doesn't need to handle + continue + } + } + + _, err = kr.deleteItem(obj, groupResource, selectedItem.targetNamespace) + if err != nil { + return errors.Wrap(err, "deleteItem error") + } + } + } + + return nil +} + +// getSelectedRestoreableItems applies Kubernetes selectors on individual items +// of each resource type to create a list of items which will be actually +// restored. +func (kr *kubernetesRestorer) getSelectedRestoreableItems(resource string, namespace string, items []string) (restoreableResource, error) { + restorable := restoreableResource{ + resource: resource, + selectedItemsByNamespace: make(map[string][]restoreableItem), + } + + targetNamespace := namespace + if targetNamespace != "" { + klog.Infof("Resource '%s' will be restored into namespace '%s'", resource, targetNamespace) + } else { + klog.Infof("Resource '%s' will be restored at cluster scope", resource) + } + + resourceForPath := resource + + for _, item := range items { + itemPath := archive.GetItemFilePath(kr.restoreDir, resourceForPath, namespace, item) + + obj, err := archive.Unmarshal(kr.fileSystem, itemPath) + if err != nil { + return restorable, errors.Errorf("error decoding %q: %v", strings.Replace(itemPath, kr.restoreDir+"/", "", -1), err) + } + + if resource == kuberesource.Namespaces.String() { + // handle remapping for namespace resource + targetNamespace = item + } + + selectedItem := restoreableItem{ + path: itemPath, + name: item, + targetNamespace: targetNamespace, + version: obj.GroupVersionKind().Version, + } + restorable.selectedItemsByNamespace[namespace] = + append(restorable.selectedItemsByNamespace[namespace], selectedItem) + restorable.totalItems++ + } + return restorable, nil +} + +func (kr *kubernetesRestorer) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (bool, error) { + // itemExists bool is used to determine whether to include this item in the "wait for additional items" list + itemExists := false + resourceID := getResourceID(groupResource, namespace, obj.GetName()) + + if namespace != "" { + nsToEnsure := getNamespace(archive.GetItemFilePath(kr.restoreDir, "namespaces", "", obj.GetNamespace()), namespace) + _, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady(nsToEnsure, kr.namespaceClient, kr.resourceTerminatingTimeout) + if err != nil { + return itemExists, err + } + // Add the newly created namespace to the list of restored items. + if nsCreated { + itemKey := types.ItemKey{ + Resource: groupResource.String(), + Namespace: nsToEnsure.Namespace, + Name: nsToEnsure.Name, + } + kr.request.RestoredItems[itemKey] = types.RestoredItemStatus{Action: constants.ItemRestoreResultCreated, ItemExists: true} + } + } + + complete, err := isCompleted(obj, groupResource) + if err != nil { + return itemExists, errors.Errorf("error checking completion of %q: %v", resourceID, err) + } + if complete { + klog.Infof("%s is complete - skipping", kube.NamespaceAndName(obj)) + return itemExists, nil + } + + name := obj.GetName() + + // Check if we've already restored this itemKey. + itemKey := types.ItemKey{ + Resource: groupResource.String(), + Namespace: namespace, + Name: name, + } + + if prevRestoredItemStatus, exists := kr.request.RestoredItems[itemKey]; exists { + klog.Infof("Skipping %s because it's already been restored.", resourceID) + itemExists = prevRestoredItemStatus.ItemExists + return itemExists, nil + } + kr.request.RestoredItems[itemKey] = types.RestoredItemStatus{ItemExists: itemExists} + defer func() { + itemStatus := kr.request.RestoredItems[itemKey] + // the action field is set explicitly + if len(itemStatus.Action) > 0 { + return + } + // others are all failed + itemStatus.Action = constants.ItemRestoreResultFailed + kr.request.RestoredItems[itemKey] = itemStatus + }() + + if action, ok := kr.actions[groupResource.String()]; ok { + obj, err = action.Execute(obj, kr) + if err != nil { + return itemExists, errors.Errorf("error execute %s action: %v", groupResource.String(), err) + } + } + + //objStatus, statusFieldExists, statusFieldErr := unstructured.NestedFieldCopy(obj.Object, "status") + // Clear out non-core metadata fields and status. + if obj, err = kube.ResetMetadataAndStatus(obj); err != nil { + return itemExists, err + } + + // The object apiVersion might get modified by a RestorePlugin so we need to + // get a new client to reflect updated resource path. + newGR := schema.GroupResource{Group: obj.GroupVersionKind().Group, Resource: groupResource.Resource} + resourceClient, err := kr.getResourceClient(newGR, obj, obj.GetNamespace()) + if err != nil { + return itemExists, errors.Errorf("error getting updated resource client for namespace %q, resource %q: %v", namespace, &groupResource, err) + } + + klog.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name) + + var _ *unstructured.Unstructured + var restoreErr error + + klog.Infof("Creating %s: %v", obj.GroupVersionKind().Kind, name) + _, restoreErr = resourceClient.Create(obj) + if restoreErr == nil { + itemExists = true + kr.request.RestoredItems[itemKey] = types.RestoredItemStatus{Action: constants.ItemRestoreResultCreated, ItemExists: itemExists} + } + + // Error was something other than an AlreadyExists. + if restoreErr != nil { + if apierrors.IsAlreadyExists(restoreErr) { + klog.Warningf("%s already exists", resourceID) + return itemExists, nil + } + return itemExists, errors.Errorf("error restoring %s: %v", resourceID, restoreErr) + } + + return itemExists, nil +} + +func (kr *kubernetesRestorer) deleteItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (bool, error) { + // Check if we've already restored this itemKey. + itemKey := types.ItemKey{ + Resource: groupResource.String(), + Namespace: namespace, + Name: obj.GetName(), + } + + // The object apiVersion might get modified by a RestorePlugin so we need to + // get a new client to reflect updated resource path. + resourceClient, err := kr.getResourceClient(groupResource, obj, obj.GetNamespace()) + if err != nil { + return false, errors.Errorf("error getting updated resource client for namespace %q, resource %q: %v", namespace, &groupResource, err) + } + + if action, ok := kr.actions[groupResource.String()]; ok { + klog.Infof("Attempting to revert %s: %v", obj.GroupVersionKind().Kind, obj.GetName()) + fromCluster, err := resourceClient.Get(obj.GetName(), metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Warningf("resource %s not found. skip unstore", obj.GroupVersionKind().String(), obj.GetName()) + return true, nil + } else { + return false, errors.Wrapf(err, "get resource %s %s failed.", obj.GroupVersionKind().String(), obj.GetName()) + } + } + + updatedObj, err := action.Revert(fromCluster, kr) + if err != nil { + return false, errors.Errorf("error revert %s action: %v", groupResource.String(), err) + } + + patchBytes, err := kube.GeneratePatch(fromCluster, updatedObj) + if err != nil { + return false, errors.Wrap(err, "error generating patch") + } + if patchBytes == nil { + klog.Infof("the same obj %s. skipped patch", obj.GetName()) + } else { + _, err = resourceClient.Patch(obj.GetName(), patchBytes) + if err != nil { + return false, errors.Wrapf(err, "patch %s error", obj.GetName()) + } + } + } + + klog.Infof("Deleting %s: %v", obj.GroupVersionKind().Kind, obj.GetName()) + deleteOptions := metav1.DeleteOptions{} + if groupResource == kuberesource.Pods { + graceDeleteSecond := int64(0) + deleteOptions = metav1.DeleteOptions{GracePeriodSeconds: &graceDeleteSecond} + } + err = resourceClient.Delete(obj.GetName(), deleteOptions) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Warningf("delete %s %s error because resource not found.", obj.GroupVersionKind().String(), obj.GetName()) + } else { + klog.Errorf("error delete delete %s %s. %s", obj.GroupVersionKind().String(), obj.GetName(), err.Error()) + } + } + delete(kr.request.RestoredItems, itemKey) + return true, nil +} + +func (kr *kubernetesRestorer) getResourceClient(groupResource schema.GroupResource, obj *unstructured.Unstructured, namespace string) (client.Dynamic, error) { + key := getResourceClientKey(groupResource, obj.GroupVersionKind().Version, namespace) + + if client, ok := kr.resourceClients[key]; ok { + return client, nil + } + + // Initialize client for this resource. We need metadata from an object to + // do this. + klog.Infof("Getting client for %v", obj.GroupVersionKind()) + + resource := metav1.APIResource{ + Namespaced: len(namespace) > 0, + Name: groupResource.Resource, + } + + client, err := kr.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace) + if err != nil { + return nil, err + } + + kr.resourceClients[key] = client + return client, nil +} +func getResourceClientKey(groupResource schema.GroupResource, version, namespace string) resourceClientKey { + return resourceClientKey{ + resource: groupResource.WithVersion(version), + namespace: namespace, + } +} + +// isCompleted returns whether or not an object is considered completed. Used to +// identify whether or not an object should be restored. Only Jobs or Pods are +// considered. +func isCompleted(obj *unstructured.Unstructured, groupResource schema.GroupResource) (bool, error) { + switch groupResource { + case kuberesource.Pods: + phase, _, err := unstructured.NestedString(obj.UnstructuredContent(), "status", "phase") + if err != nil { + return false, errors.WithStack(err) + } + if phase == string(v1.PodFailed) || phase == string(v1.PodSucceeded) { + return true, nil + } + + case kuberesource.Jobs: + ct, found, err := unstructured.NestedString(obj.UnstructuredContent(), "status", "completionTime") + if err != nil { + return false, errors.WithStack(err) + } + if found && ct != "" { + return true, nil + } + } + // Assume any other resource isn't complete and can be restored. + return false, nil +} + +func getResourceID(groupResource schema.GroupResource, namespace, name string) string { + if namespace == "" { + return fmt.Sprintf("%s/%s", groupResource.String(), name) + } + + return fmt.Sprintf("%s/%s/%s", groupResource.String(), namespace, name) +} + +// getNamespace returns a namespace API object that we should attempt to +// create before restoring anything into it. It will come from the backup +// tarball if it exists, else will be a new one. If from the tarball, it +// will retain its labels, annotations, and spec. +func getNamespace(path, remappedName string) *v1.Namespace { + var nsBytes []byte + var err error + + if nsBytes, err = os.ReadFile(path); err != nil { + return &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: remappedName, + }, + } + } + + var backupNS v1.Namespace + if err := json.Unmarshal(nsBytes, &backupNS); err != nil { + klog.Warningf("Error unmarshaling namespace from backup, creating new one.") + return &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: remappedName, + }, + } + } + + return &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: backupNS.Kind, + APIVersion: backupNS.APIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: remappedName, + Labels: backupNS.Labels, + Annotations: backupNS.Annotations, + }, + Spec: backupNS.Spec, + } +} + +// getOrderedResources returns an ordered list of resource identifiers to restore, +// based on the provided resource priorities and backup contents. The returned list +// begins with all of the high prioritized resources (in order), ends with all of +// the low prioritized resources(in order), and an alphabetized list of resources +// in the backup(pick out the prioritized resources) is put in the middle. +func getOrderedResources(resourcePriorities Priorities, backupResources map[string]*archive.ResourceItems) []string { + priorities := map[string]struct{}{} + for _, priority := range resourcePriorities.HighPriorities { + priorities[priority] = struct{}{} + } + for _, priority := range resourcePriorities.LowPriorities { + priorities[priority] = struct{}{} + } + + // pick the prioritized resources out + var orderedBackupResources []string + for resource := range backupResources { + if _, exist := priorities[resource]; exist { + continue + } + orderedBackupResources = append(orderedBackupResources, resource) + } + // alphabetize resources in the backup + sort.Strings(orderedBackupResources) + + list := append(resourcePriorities.HighPriorities, orderedBackupResources...) + return append(list, resourcePriorities.LowPriorities...) +} + +// ReversSlice reverse the slice +func reversSlice(s []string) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/service_restore_action.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/service_restore_action.go new file mode 100644 index 000000000..c00dc6329 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/service_restore_action.go @@ -0,0 +1,68 @@ +package restore + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type ServiceAction struct { +} + +func NewServiceAction() *ServiceAction { + return &ServiceAction{} +} + +func (p *ServiceAction) Resource() []string { + return []string{"services"} +} + +func (p *ServiceAction) Execute(obj *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedService := new(corev1.Service) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, updatedService); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to service") + } + + if updatedService.Spec.ClusterIP != "None" { + updatedService.Spec.ClusterIP = "" + updatedService.Spec.ClusterIPs = nil + } + + annotations := updatedService.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + if _, ok := annotations["kosmos.io/auto-create-mcs"]; !ok { + annotations["kosmos.io/auto-create-mcs"] = "true" + } + updatedService.SetAnnotations(annotations) + + serviceMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedService) + if err != nil { + return nil, errors.Wrap(err, "unable to convert pod to unstructured item") + } + return &unstructured.Unstructured{Object: serviceMap}, nil +} + +func (p *ServiceAction) Revert(fromCluster *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedService := new(corev1.Service) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(fromCluster.Object, updatedService); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to service") + } + + annotations := updatedService.GetAnnotations() + if annotations != nil { + if _, ok := annotations["kosmos.io/auto-create-mcs"]; ok { + delete(annotations, "kosmos.io/auto-create-mcs") + updatedService.SetAnnotations(annotations) + serviceMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedService) + if err != nil { + return nil, errors.Wrap(err, "unable to convert service to unstructured item") + } + return &unstructured.Unstructured{Object: serviceMap}, nil + } + } + + return fromCluster, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/sts_deploy_restore_action.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/sts_deploy_restore_action.go new file mode 100644 index 000000000..3068c0724 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/sts_deploy_restore_action.go @@ -0,0 +1,128 @@ +package restore + +import ( + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type StsDeployAction struct { +} + +func NewStsDeployAction() *StsDeployAction { + return &StsDeployAction{} +} + +func (p *StsDeployAction) Resource() []string { + return []string{"statefulsets.apps", "deployments.apps"} +} + +//nolint:gosec // No need to check. +func (p *StsDeployAction) Execute(obj *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + _ = &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: corev1.NodeSelectorOpIn, + Values: []string{restorer.kosmosNodeName}, + }, + }, + }, + }, + }, + } + + kosmosToleration := corev1.Toleration{ + Key: "kosmos.io/node", + Operator: corev1.TolerationOpEqual, + Value: "true", + Effect: corev1.TaintEffectNoSchedule, + } + + var updatedObj interface{} + + if obj.GetKind() == "Deployment" { + updatedDeploy := new(appsv1.Deployment) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, updatedDeploy); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to deployment") + } + + //affinity := updatedDeploy.Spec.Template.Spec.Affinity + //if affinity == nil { + // affinity = &corev1.Affinity{ + // NodeAffinity: updatedNodeAffinity, + // } + //} else { + // updatedDeploy.Spec.Template.Spec.Affinity.NodeAffinity = updatedNodeAffinity + //} + + tolerations := updatedDeploy.Spec.Template.Spec.Tolerations + if tolerations == nil { + tolerations = make([]corev1.Toleration, 1) + tolerations[0] = kosmosToleration + } else { + kosmosExist := false + for _, toleration := range tolerations { + if toleration.Key == kosmosToleration.Key { + kosmosExist = true + break + } + } + + if !kosmosExist { + updatedDeploy.Spec.Template.Spec.Tolerations = append(tolerations, kosmosToleration) + } + } + updatedObj = updatedDeploy + } else if obj.GetKind() == "StatefulSet" { + updatedSts := new(appsv1.StatefulSet) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, updatedSts); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to statefulset") + } + + //affinity := updatedSts.Spec.Template.Spec.Affinity + //if affinity == nil { + // affinity = &corev1.Affinity{ + // NodeAffinity: updatedNodeAffinity, + // } + //} else { + // updatedSts.Spec.Template.Spec.Affinity.NodeAffinity = updatedNodeAffinity + //} + + tolerations := updatedSts.Spec.Template.Spec.Tolerations + if tolerations == nil { + tolerations = make([]corev1.Toleration, 1) + tolerations[0] = kosmosToleration + } else { + kosmosExist := false + for _, toleration := range tolerations { + if toleration.Key == kosmosToleration.Key { + kosmosExist = true + break + } + } + + if !kosmosExist { + updatedSts.Spec.Template.Spec.Tolerations = append(tolerations, kosmosToleration) + } + } + updatedObj = updatedSts + } else { + return nil, errors.Errorf("unknow obj kind %s", obj.GetKind()) + } + + stsMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&updatedObj) + if err != nil { + return nil, errors.Wrap(err, "unable to convert sts/deploy to unstructured item") + } + return &unstructured.Unstructured{Object: stsMap}, nil +} + +func (p *StsDeployAction) Revert(fromCluster *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + return fromCluster, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/restore/universal_restore_action.go b/pkg/clustertree/cluster-manager/controllers/promote/restore/universal_restore_action.go new file mode 100644 index 000000000..54388c2bf --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/restore/universal_restore_action.go @@ -0,0 +1,56 @@ +package restore + +import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +type UniversalAction struct { +} + +func NewUniversalAction() *UniversalAction { + return &UniversalAction{} +} + +func (p *UniversalAction) Resource() []string { + return []string{"persistentvolumeclaims", "configmaps", "secrets", "serviceaccounts", "roles.rbac.authorization.k8s.io", "rolebindings.rbac.authorization.k8s.io"} +} + +func (p *UniversalAction) Execute(obj *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedObj := obj.DeepCopy() + objectMeta, err := meta.Accessor(updatedObj) + if err != nil { + return nil, errors.WithStack(err) + } + + annotations := objectMeta.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + + if _, ok := annotations["kosmos-io/cluster-owners"]; !ok { + annotations["kosmos-io/cluster-owners"] = restorer.kosmosClusterName + updatedObj.SetAnnotations(annotations) + } + + return updatedObj, nil +} + +func (p *UniversalAction) Revert(fromCluster *unstructured.Unstructured, restorer *kubernetesRestorer) (*unstructured.Unstructured, error) { + updatedObj := fromCluster.DeepCopy() + objectMeta, err := meta.Accessor(updatedObj) + if err != nil { + return nil, errors.WithStack(err) + } + + annotations := objectMeta.GetAnnotations() + if annotations != nil { + if _, ok := annotations["kosmos-io/cluster-owners"]; ok { + delete(annotations, "kosmos-io/cluster-owners") + updatedObj.SetAnnotations(annotations) + } + } + + return updatedObj, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/types/types.go b/pkg/clustertree/cluster-manager/controllers/promote/types/types.go new file mode 100644 index 000000000..a1d7e3d64 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/types/types.go @@ -0,0 +1,15 @@ +package types + +// HandlerInitializer is a function that initializes and returns a new instance of one of action interfaces +type HandlerInitializer func() (interface{}, error) + +type ItemKey struct { + Resource string + Namespace string + Name string +} + +type RestoredItemStatus struct { + Action string + ItemExists bool +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/extractor.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/extractor.go new file mode 100644 index 000000000..853508a4e --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/extractor.go @@ -0,0 +1,112 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package archive + +import ( + "archive/tar" + "compress/gzip" + "io" + "path/filepath" + + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem" +) + +// Extractor unzips/extracts a backup tarball to a local +// temp directory. +type Extractor struct { + fs filesystem.Interface +} + +func NewExtractor(fs filesystem.Interface) *Extractor { + return &Extractor{ + fs: fs, + } +} + +// UnzipAndExtractBackup extracts a reader on a gzipped tarball to a local temp directory +func (e *Extractor) UnzipAndExtractBackup(src io.Reader) (string, error) { + gzr, err := gzip.NewReader(src) + if err != nil { + klog.Infof("error creating gzip reader: %v", err) + return "", err + } + defer gzr.Close() + + return e.readBackup(tar.NewReader(gzr)) +} + +func (e *Extractor) writeFile(target string, tarRdr *tar.Reader) error { + file, err := e.fs.Create(target) + if err != nil { + return err + } + defer file.Close() + + if _, err := io.Copy(file, tarRdr); err != nil { + return err + } + return nil +} + +func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) { + dir, err := e.fs.TempDir("", "") + if err != nil { + klog.Infof("error creating temp dir: %v", err) + return "", err + } + + for { + header, err := tarRdr.Next() + + if err == io.EOF { + break + } + if err != nil { + klog.Infof("error reading tar: %v", err) + return "", err + } + + target := filepath.Join(dir, header.Name) //nolint:gosec // Internal usage. No need to check. + + switch header.Typeflag { + case tar.TypeDir: + err := e.fs.MkdirAll(target, header.FileInfo().Mode()) + if err != nil { + klog.Infof("mkdirall error: %v", err) + return "", err + } + + case tar.TypeReg: + // make sure we have the directory created + err := e.fs.MkdirAll(filepath.Dir(target), header.FileInfo().Mode()) + if err != nil { + klog.Infof("mkdirall error: %v", err) + return "", err + } + + // create the file + if err := e.writeFile(target, tarRdr); err != nil { + klog.Infof("error copying: %v", err) + return "", err + } + } + } + + return dir, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/filesystem.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/filesystem.go new file mode 100644 index 000000000..1eed933f8 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/filesystem.go @@ -0,0 +1,64 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package archive + +import ( + "encoding/json" + "path/filepath" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/constants" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem" +) + +// GetItemFilePath returns an item's file path once extracted from a Velero backup archive. +func GetItemFilePath(rootDir, groupResource, namespace, name string) string { + return GetVersionedItemFilePath(rootDir, groupResource, namespace, name, "") +} + +// GetVersionedItemFilePath returns an item's file path once extracted from a Velero backup archive, with version included. +func GetVersionedItemFilePath(rootDir, groupResource, namespace, name, versionPath string) string { + path := filepath.Join(rootDir, constants.ResourcesDir, groupResource, versionPath, GetScopeDir(namespace), namespace, name+".json") + return filepath.ToSlash(path) +} + +// GetScopeDir returns NamespaceScopedDir if namespace is present, or ClusterScopedDir if empty +func GetScopeDir(namespace string) string { + if namespace == "" { + return constants.ClusterScopedDir + } + return constants.NamespaceScopedDir +} + +// Unmarshal reads the specified file, unmarshals the JSON contained within it +// and returns an Unstructured object. +func Unmarshal(fs filesystem.Interface, filePath string) (*unstructured.Unstructured, error) { + var obj unstructured.Unstructured + + bytes, err := fs.ReadFile(filePath) + if err != nil { + return nil, err + } + + err = json.Unmarshal(bytes, &obj) + if err != nil { + return nil, err + } + + return &obj, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/parser.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/parser.go new file mode 100644 index 000000000..ee829e0e6 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/archive/parser.go @@ -0,0 +1,260 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package archive + +import ( + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/constants" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem" +) + +var ErrNotExist = errors.New("does not exist") + +// Parser traverses an extracted archive on disk to validate +// it and provide a helpful representation of it to consumers. +type Parser struct { + fs filesystem.Interface +} + +// ResourceItems contains the collection of items of a given resource type +// within a backup, grouped by namespace (or empty string for cluster-scoped +// resources). +type ResourceItems struct { + // GroupResource is API group and resource name, + // formatted as "resource.group". For the "core" + // API group, the ".group" suffix is omitted. + GroupResource string + + // ItemsByNamespace is a map from namespace (or empty string + // for cluster-scoped resources) to a list of individual item + // names contained in the archive. Item names **do not** include + // the file extension. + ItemsByNamespace map[string][]string +} + +// NewParser constructs a Parser. +func NewParser(fs filesystem.Interface) *Parser { + return &Parser{ + fs: fs, + } +} + +// Parse reads an extracted backup on the file system and returns +// a structured catalog of the resources and items contained within it. +func (p *Parser) Parse(dir string) (map[string]*ResourceItems, error) { + // ensure top-level "resources" directory exists, and read subdirectories + // of it, where each one is expected to correspond to a resource. + resourcesDir := filepath.Join(dir, constants.ResourcesDir) + resourceDirs, err := p.checkAndReadDir(resourcesDir) + if err != nil { + return nil, err + } + + // loop through each subdirectory (one per resource) and assemble + // catalog of items within it. + resources := map[string]*ResourceItems{} + for _, resourceDir := range resourceDirs { + if !resourceDir.IsDir() { + klog.Warningf("Ignoring unexpected file %q in directory %q", resourceDir.Name(), strings.TrimPrefix(resourcesDir, dir+"/")) + continue + } + + resourceItems := &ResourceItems{ + GroupResource: resourceDir.Name(), + ItemsByNamespace: map[string][]string{}, + } + + // check for existence of a "cluster" subdirectory containing cluster-scoped + // instances of this resource, and read its contents if it exists. + clusterScopedDir := filepath.Join(resourcesDir, resourceDir.Name(), constants.ClusterScopedDir) + exists, err := p.fs.DirExists(clusterScopedDir) + if err != nil { + return nil, errors.Wrapf(err, "error checking for existence of directory %q", strings.TrimPrefix(clusterScopedDir, dir+"/")) + } + if exists { + items, err := p.getResourceItemsForScope(clusterScopedDir, dir) + if err != nil { + return nil, err + } + + if len(items) > 0 { + resourceItems.ItemsByNamespace[""] = items + } + } + + // check for existence of a "namespaces" subdirectory containing further subdirectories, + // one per namespace, and read its contents if it exists. + namespaceScopedDir := filepath.Join(resourcesDir, resourceDir.Name(), constants.NamespaceScopedDir) + exists, err = p.fs.DirExists(namespaceScopedDir) + if err != nil { + return nil, errors.Wrapf(err, "error checking for existence of directory %q", strings.TrimPrefix(namespaceScopedDir, dir+"/")) + } + if exists { + namespaceDirs, err := p.fs.ReadDir(namespaceScopedDir) + if err != nil { + return nil, errors.Wrapf(err, "error reading contents of directory %q", strings.TrimPrefix(namespaceScopedDir, dir+"/")) + } + + for _, namespaceDir := range namespaceDirs { + if !namespaceDir.IsDir() { + klog.Warningf("Ignoring unexpected file %q in directory %q", namespaceDir.Name(), strings.TrimPrefix(namespaceScopedDir, dir+"/")) + continue + } + + items, err := p.getResourceItemsForScope(filepath.Join(namespaceScopedDir, namespaceDir.Name()), dir) + if err != nil { + return nil, err + } + + if len(items) > 0 { + resourceItems.ItemsByNamespace[namespaceDir.Name()] = items + } + } + } + + resources[resourceDir.Name()] = resourceItems + } + + return resources, nil +} + +// getResourceItemsForScope returns the list of items with a namespace or +// cluster-scoped subdirectory for a specific resource. +func (p *Parser) getResourceItemsForScope(dir, archiveRootDir string) ([]string, error) { + files, err := p.fs.ReadDir(dir) + if err != nil { + return nil, errors.Wrapf(err, "error reading contents of directory %q", strings.TrimPrefix(dir, archiveRootDir+"/")) + } + + var items []string + for _, file := range files { + if file.IsDir() { + klog.Warningf("Ignoring unexpected subdirectory %q in directory %q", file.Name(), strings.TrimPrefix(dir, archiveRootDir+"/")) + continue + } + + items = append(items, strings.TrimSuffix(file.Name(), ".json")) + } + + return items, nil +} + +// checkAndReadDir is a wrapper around fs.DirExists and fs.ReadDir that does checks +// and returns errors if directory cannot be read. +func (p *Parser) checkAndReadDir(dir string) ([]os.FileInfo, error) { + exists, err := p.fs.DirExists(dir) + if err != nil { + return nil, errors.Wrapf(err, "error checking for existence of directory %q", filepath.ToSlash(dir)) + } + if !exists { + return nil, errors.Wrapf(ErrNotExist, "directory %q", filepath.ToSlash(dir)) + } + + contents, err := p.fs.ReadDir(dir) + if err != nil { + return nil, errors.Wrapf(err, "reading contents of %q", filepath.ToSlash(dir)) + } + + return contents, nil +} + +// ParseGroupVersions extracts the versions for each API Group from the backup +// directory names and stores them in a metav1 APIGroup object. +func (p *Parser) ParseGroupVersions(dir string) (map[string]metav1.APIGroup, error) { + resourcesDir := filepath.Join(dir, constants.ResourcesDir) + + // Get the subdirectories inside the "resources" directory. The subdirectories + // will have resource.group names like "horizontalpodautoscalers.autoscaling". + rgDirs, err := p.checkAndReadDir(resourcesDir) + if err != nil { + return nil, err + } + + resourceAGs := make(map[string]metav1.APIGroup) + + // Loop through the resource.group directory names. + for _, rgd := range rgDirs { + group := metav1.APIGroup{ + Name: extractGroupName(rgd.Name()), + } + + rgdPath := filepath.Join(resourcesDir, rgd.Name()) + + // Inside each of the resource.group directories are directories whose + // names are API Group versions like "v1" or "v1-preferredversion" + gvDirs, err := p.checkAndReadDir(rgdPath) + if err != nil { + return nil, err + } + + var supportedVersions []metav1.GroupVersionForDiscovery + + for _, gvd := range gvDirs { + gvdName := gvd.Name() + + // Don't save the namespaces or clusters directories in list of + // supported API Group Versions. + if gvdName == "namespaces" || gvdName == "cluster" { + continue + } + + version := metav1.GroupVersionForDiscovery{ + GroupVersion: strings.TrimPrefix(group.Name+"/"+gvdName, "/"), + Version: gvdName, + } + + if strings.Contains(gvdName, constants.PreferredVersionDir) { + gvdName = strings.TrimSuffix(gvdName, constants.PreferredVersionDir) + + // Update version and group version to be without suffix. + version.Version = gvdName + version.GroupVersion = strings.TrimPrefix(group.Name+"/"+gvdName, "/") + + group.PreferredVersion = version + } + + supportedVersions = append(supportedVersions, version) + } + + group.Versions = supportedVersions + + resourceAGs[rgd.Name()] = group + } + + return resourceAGs, nil +} + +// extractGroupName will take a concatenated resource.group and extract the group, +// if there is one. Resources like "pods" which has no group and will return an +// empty string. +func extractGroupName(resourceGroupDir string) string { + parts := strings.SplitN(resourceGroupDir, ".", 2) + var group string + + if len(parts) == 2 { + group = parts[1] + } + + return group +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/collections/includes_excludes.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/collections/includes_excludes.go new file mode 100644 index 000000000..036e8c74a --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/collections/includes_excludes.go @@ -0,0 +1,529 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collections + +import ( + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/discovery" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/promote/kuberesource" +) + +// IncludesExcludes is a type that manages lists of included +// and excluded items. The logic implemented is that everything +// in the included list except those items in the excluded list +// should be included. '*' in the includes list means "include +// everything", but it is not valid in the exclude list. +type IncludesExcludes struct { + includes sets.Set[string] + excludes sets.Set[string] +} + +func NewIncludesExcludes() *IncludesExcludes { + return &IncludesExcludes{ + includes: sets.KeySet(make(map[string]struct{})), + excludes: sets.KeySet(make(map[string]struct{})), + } +} + +// Includes adds items to the includes list. '*' is a wildcard +// value meaning "include everything". +func (ie *IncludesExcludes) Includes(includes ...string) *IncludesExcludes { + ie.includes.Insert(includes...) + return ie +} + +// GetIncludes returns the items in the includes list +func (ie *IncludesExcludes) GetIncludes() []string { + list := []string{} + for key := range ie.includes { + list = append(list, key) + } + return list +} + +// Excludes adds items to the excludes list +func (ie *IncludesExcludes) Excludes(excludes ...string) *IncludesExcludes { + ie.excludes.Insert(excludes...) + return ie +} + +// GetExcludes returns the items in the excludes list +func (ie *IncludesExcludes) GetExcludes() []string { + list := []string{} + for key := range ie.excludes { + list = append(list, key) + } + return list +} + +// ShouldInclude returns whether the specified item should be +// included or not. Everything in the includes list except those +// items in the excludes list should be included. +func (ie *IncludesExcludes) ShouldInclude(s string) bool { + if ie.excludes.Has(s) { + return false + } + + // len=0 means include everything + return ie.includes.Len() == 0 || ie.includes.Has("*") || ie.includes.Has(s) +} + +// IncludesExcludesInterface is used as polymorphic IncludesExcludes for Global and scope +// resources Include/Exclude. +type IncludesExcludesInterface interface { + // Check whether the type name passed in by parameter should be included. + // typeName should be k8s.io/apimachinery/pkg/runtime/schema GroupResource's String() result. + ShouldInclude(typeName string) bool + + // Check whether the type name passed in by parameter should be excluded. + // typeName should be k8s.io/apimachinery/pkg/runtime/schema GroupResource's String() result. + ShouldExclude(typeName string) bool +} + +type GlobalIncludesExcludes struct { + resourceFilter IncludesExcludes + includeClusterResources *bool + namespaceFilter IncludesExcludes + + helper discovery.Helper +} + +// ShouldInclude returns whether the specified item should be +// included or not. Everything in the includes list except those +// items in the excludes list should be included. +// It has some exceptional cases. When IncludeClusterResources is set to false, +// no need to check the filter, all cluster resources are excluded. +func (ie *GlobalIncludesExcludes) ShouldInclude(typeName string) bool { + _, resource, err := ie.helper.ResourceFor(schema.ParseGroupResource(typeName).WithVersion("")) + if err != nil { + klog.Errorf("fail to get resource %s. %s", typeName, err.Error()) + return false + } + + if !resource.Namespaced && IsSetToFalse(ie.includeClusterResources) { + klog.Info("Skipping resource %s, because it's cluster-scoped, and IncludeClusterResources is set to false.", typeName) + return false + } + + // when IncludeClusterResources == nil (auto), only directly + // back up cluster-scoped resources if we're doing a full-cluster + // (all namespaces and all namespace scope types) backup. Note that in the case of a subset of + // namespaces being backed up, some related cluster-scoped resources + // may still be backed up if triggered by a custom action (e.g. PVC->PV). + // If we're processing namespaces themselves, we will not skip here, they may be + // filtered out later. + if typeName != kuberesource.Namespaces.String() && !resource.Namespaced && + ie.includeClusterResources == nil && !ie.namespaceFilter.IncludeEverything() { + klog.Infof("Skipping resource %s, because it's cluster-scoped and only specific namespaces or namespace scope types are included in the backup.", typeName) + return false + } + + return ie.resourceFilter.ShouldInclude(typeName) +} + +// ShouldExclude returns whether the resource type should be excluded or not. +func (ie *GlobalIncludesExcludes) ShouldExclude(typeName string) bool { + // if the type name is specified in excluded list, it's excluded. + if ie.resourceFilter.excludes.Has(typeName) { + return true + } + + _, resource, err := ie.helper.ResourceFor(schema.ParseGroupResource(typeName).WithVersion("")) + if err != nil { + klog.Errorf("fail to get resource %s. %s", typeName, err.Error()) + return true + } + + // the resource type is cluster scope + if !resource.Namespaced { + // if includeClusterResources is set to false, cluster resource should be excluded. + if IsSetToFalse(ie.includeClusterResources) { + return true + } + // if includeClusterResources is set to nil, check whether it's included by resource + // filter. + if ie.includeClusterResources == nil && !ie.resourceFilter.ShouldInclude(typeName) { + return true + } + } + + return false +} + +type ScopeIncludesExcludes struct { + namespaceScopedResourceFilter IncludesExcludes // namespace-scoped resource filter + clusterScopedResourceFilter IncludesExcludes // cluster-scoped resource filter + namespaceFilter IncludesExcludes // namespace filter + + helper discovery.Helper +} + +// ShouldInclude returns whether the specified resource should be included or not. +// The function will check whether the resource is namespace-scoped resource first. +// For namespace-scoped resource, except resources listed in excludes, other things should be included. +// For cluster-scoped resource, except resources listed in excludes, only include the resource specified by the included. +// It also has some exceptional checks. For namespace, as long as it's not excluded, it is involved. +// If all namespace-scoped resources are included, all cluster-scoped resource are returned to get a full backup. +func (ie *ScopeIncludesExcludes) ShouldInclude(typeName string) bool { + _, resource, err := ie.helper.ResourceFor(schema.ParseGroupResource(typeName).WithVersion("")) + if err != nil { + klog.Errorf("fail to get resource %s. %s", typeName, err.Error()) + return false + } + + if resource.Namespaced { + if ie.namespaceScopedResourceFilter.excludes.Has("*") || ie.namespaceScopedResourceFilter.excludes.Has(typeName) { + return false + } + + // len=0 means include everything + return ie.namespaceScopedResourceFilter.includes.Len() == 0 || ie.namespaceScopedResourceFilter.includes.Has("*") || ie.namespaceScopedResourceFilter.includes.Has(typeName) + } + + if ie.clusterScopedResourceFilter.excludes.Has("*") || ie.clusterScopedResourceFilter.excludes.Has(typeName) { + return false + } + + // when IncludedClusterScopedResources and ExcludedClusterScopedResources are not specified, + // only directly back up cluster-scoped resources if we're doing a full-cluster + // (all namespaces and all namespace-scoped types) backup. + if ie.clusterScopedResourceFilter.includes.Len() == 0 && + ie.clusterScopedResourceFilter.excludes.Len() == 0 && + ie.namespaceFilter.IncludeEverything() && + ie.namespaceScopedResourceFilter.IncludeEverything() { + return true + } + + // Also include namespace resource by default. + return ie.clusterScopedResourceFilter.includes.Has("*") || ie.clusterScopedResourceFilter.includes.Has(typeName) || typeName == kuberesource.Namespaces.String() +} + +// ShouldExclude returns whether the resource type should be excluded or not. +// For ScopeIncludesExcludes, if the resource type is specified in the exclude +// list, it should be excluded. +func (ie *ScopeIncludesExcludes) ShouldExclude(typeName string) bool { + _, resource, err := ie.helper.ResourceFor(schema.ParseGroupResource(typeName).WithVersion("")) + if err != nil { + klog.Errorf("fail to get resource %s. %s", typeName, err.Error()) + return true + } + + if resource.Namespaced { + if ie.namespaceScopedResourceFilter.excludes.Has(typeName) { + return true + } + } else { + if ie.clusterScopedResourceFilter.excludes.Has(typeName) { + return true + } + } + return false +} + +// IncludesString returns a string containing all of the includes, separated by commas, or * if the +// list is empty. +func (ie *IncludesExcludes) IncludesString() string { + return asString(ie.includes, "*") +} + +// ExcludesString returns a string containing all of the excludes, separated by commas, or if the +// list is empty. +func (ie *IncludesExcludes) ExcludesString() string { + return asString(ie.excludes, "") +} + +func asString(in sets.Set[string], empty string) string { + if in.Len() == 0 { + return empty + } + resultSlice := []string{} + for key := range in { + resultSlice = append(resultSlice, key) + } + return strings.Join(resultSlice, ", ") +} + +// IncludeEverything returns true if the includes list is empty or '*' +// and the excludes list is empty, or false otherwise. +func (ie *IncludesExcludes) IncludeEverything() bool { + return ie.excludes.Len() == 0 && (ie.includes.Len() == 0 || (ie.includes.Len() == 1 && ie.includes.Has("*"))) +} + +func newScopeIncludesExcludes(nsIncludesExcludes IncludesExcludes, helper discovery.Helper) *ScopeIncludesExcludes { + ret := &ScopeIncludesExcludes{ + namespaceScopedResourceFilter: IncludesExcludes{ + includes: sets.KeySet(make(map[string]struct{})), + excludes: sets.KeySet(make(map[string]struct{})), + }, + clusterScopedResourceFilter: IncludesExcludes{ + includes: sets.KeySet(make(map[string]struct{})), + excludes: sets.KeySet(make(map[string]struct{})), + }, + namespaceFilter: nsIncludesExcludes, + helper: helper, + } + + return ret +} + +// ValidateIncludesExcludes checks provided lists of included and excluded +// items to ensure they are a valid set of IncludesExcludes data. +func ValidateIncludesExcludes(includesList, excludesList []string) []error { + // TODO we should not allow an IncludesExcludes object to be created that + // does not meet these criteria. Do a more significant refactoring to embed + // this logic in object creation/modification. + + var errs []error + + includes := sets.NewString(includesList...) + excludes := sets.NewString(excludesList...) + + if includes.Len() > 1 && includes.Has("*") { + errs = append(errs, errors.New("includes list must either contain '*' only, or a non-empty list of items")) + } + + if excludes.Has("*") { + errs = append(errs, errors.New("excludes list cannot contain '*'")) + } + + for _, itm := range excludes.List() { + if includes.Has(itm) { + errs = append(errs, errors.Errorf("excludes list cannot contain an item in the includes list: %v", itm)) + } + } + + return errs +} + +// ValidateNamespaceIncludesExcludes checks provided lists of included and +// excluded namespaces to ensure they are a valid set of IncludesExcludes data. +func ValidateNamespaceIncludesExcludes(includesList, excludesList []string) []error { + errs := ValidateIncludesExcludes(includesList, excludesList) + + includes := sets.NewString(includesList...) + excludes := sets.NewString(excludesList...) + + for _, itm := range includes.List() { + if nsErrs := validateNamespaceName(itm); nsErrs != nil { + errs = append(errs, nsErrs...) + } + } + for _, itm := range excludes.List() { + if nsErrs := validateNamespaceName(itm); nsErrs != nil { + errs = append(errs, nsErrs...) + } + } + + return errs +} + +// ValidateScopedIncludesExcludes checks provided lists of namespace-scoped or cluster-scoped +// included and excluded items to ensure they are a valid set of IncludesExcludes data. +func ValidateScopedIncludesExcludes(includesList, excludesList []string) []error { + var errs []error + + includes := sets.NewString(includesList...) + excludes := sets.NewString(excludesList...) + + if includes.Len() > 1 && includes.Has("*") { + errs = append(errs, errors.New("includes list must either contain '*' only, or a non-empty list of items")) + } + + if excludes.Len() > 1 && excludes.Has("*") { + errs = append(errs, errors.New("excludes list must either contain '*' only, or a non-empty list of items")) + } + + if includes.Len() > 0 && excludes.Has("*") { + errs = append(errs, errors.New("when exclude is '*', include cannot have value")) + } + + for _, itm := range excludes.List() { + if includes.Has(itm) { + errs = append(errs, errors.Errorf("excludes list cannot contain an item in the includes list: %v", itm)) + } + } + + return errs +} + +func validateNamespaceName(ns string) []error { + var errs []error + + // Velero interprets empty string as "no namespace", so allow it even though + // it is not a valid Kubernetes name. + if ns == "" { + return nil + } + + // Kubernetes does not allow asterisks in namespaces but Velero uses them as + // wildcards. Replace asterisks with an arbitrary letter to pass Kubernetes + // validation. + tmpNamespace := strings.ReplaceAll(ns, "*", "x") + + if errMsgs := validation.ValidateNamespaceName(tmpNamespace, false); errMsgs != nil { + for _, msg := range errMsgs { + errs = append(errs, errors.Errorf("invalid namespace %q: %s", ns, msg)) + } + } + + return errs +} + +// generateIncludesExcludes constructs an IncludesExcludes struct by taking the provided +// include/exclude slices, applying the specified mapping function to each item in them, +// and adding the output of the function to the new struct. If the mapping function returns +// an empty string for an item, it is omitted from the result. +func generateIncludesExcludes(includes, excludes []string, mapFunc func(string) string) *IncludesExcludes { + res := NewIncludesExcludes() + + for _, item := range includes { + if item == "*" { + res.Includes(item) + continue + } + + key := mapFunc(item) + if key == "" { + continue + } + res.Includes(key) + } + + for _, item := range excludes { + // wildcards are invalid for excludes, + // so ignore them. + if item == "*" { + continue + } + + key := mapFunc(item) + if key == "" { + continue + } + res.Excludes(key) + } + + return res +} + +// generateScopedIncludesExcludes's function is similar with generateIncludesExcludes, +// but it's used for scoped Includes/Excludes. +func generateScopedIncludesExcludes(namespacedIncludes, namespacedExcludes, clusterIncludes, clusterExcludes []string, mapFunc func(string, bool) string, nsIncludesExcludes IncludesExcludes, helper discovery.Helper) *ScopeIncludesExcludes { + res := newScopeIncludesExcludes(nsIncludesExcludes, helper) + + generateFilter(res.namespaceScopedResourceFilter.includes, namespacedIncludes, mapFunc, true) + generateFilter(res.namespaceScopedResourceFilter.excludes, namespacedExcludes, mapFunc, true) + generateFilter(res.clusterScopedResourceFilter.includes, clusterIncludes, mapFunc, false) + generateFilter(res.clusterScopedResourceFilter.excludes, clusterExcludes, mapFunc, false) + + return res +} + +func generateFilter(filter sets.Set[string], resources []string, mapFunc func(string, bool) string, namespaced bool) { + for _, item := range resources { + if item == "*" { + filter.Insert(item) + continue + } + + key := mapFunc(item, namespaced) + if key == "" { + continue + } + filter.Insert(key) + } +} + +// GetResourceIncludesExcludes takes the lists of resources to include and exclude, uses the +// discovery helper to resolve them to fully-qualified group-resource names, and returns an +// IncludesExcludes list. +func GetResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *IncludesExcludes { + resources := generateIncludesExcludes( + includes, + excludes, + func(item string) string { + gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion("")) + if err != nil { + // If we can't resolve it, return it as-is. This prevents the generated + // includes-excludes list from including *everything*, if none of the includes + // can be resolved. ref. https://github.com/vmware-tanzu/velero/issues/2461 + return item + } + + gr := gvr.GroupResource() + return gr.String() + }, + ) + + return resources +} + +func GetGlobalResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string, includeClusterResources *bool, nsIncludesExcludes IncludesExcludes) *GlobalIncludesExcludes { + ret := &GlobalIncludesExcludes{ + resourceFilter: *GetResourceIncludesExcludes(helper, includes, excludes), + includeClusterResources: includeClusterResources, + namespaceFilter: nsIncludesExcludes, + helper: helper, + } + + klog.Infof("Including resources: %s", ret.resourceFilter.IncludesString()) + klog.Infof("Excluding resources: %s", ret.resourceFilter.ExcludesString()) + return ret +} + +// GetScopeResourceIncludesExcludes's function is similar with GetResourceIncludesExcludes, +// but it's used for scoped Includes/Excludes, and can handle both cluster-scoped and namespace-scoped resources. +func GetScopeResourceIncludesExcludes(helper discovery.Helper, namespaceIncludes, namespaceExcludes, clusterIncludes, clusterExcludes []string, nsIncludesExcludes IncludesExcludes) *ScopeIncludesExcludes { + ret := generateScopedIncludesExcludes( + namespaceIncludes, + namespaceExcludes, + clusterIncludes, + clusterExcludes, + func(item string, namespaced bool) string { + gvr, resource, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion("")) + if err != nil { + return item + } + if resource.Namespaced != namespaced { + return "" + } + + gr := gvr.GroupResource() + return gr.String() + }, + nsIncludesExcludes, + helper, + ) + klog.Infof("Including namespace-scoped resources: %s", ret.namespaceScopedResourceFilter.IncludesString()) + klog.Infof("Excluding namespace-scoped resources: %s", ret.namespaceScopedResourceFilter.ExcludesString()) + klog.Infof("Including cluster-scoped resources: %s", ret.clusterScopedResourceFilter.IncludesString()) + klog.Infof("Excluding cluster-scoped resources: %s", ret.clusterScopedResourceFilter.ExcludesString()) + + return ret +} + +// IsSetToFalse returns true if and only if the bool pointer is non-nil and set to false. +func IsSetToFalse(b *bool) bool { + return b != nil && !*b +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem/file_system.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem/file_system.go new file mode 100644 index 000000000..5b4f332c2 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/filesystem/file_system.go @@ -0,0 +1,113 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "io" + "os" + "path/filepath" +) + +// Interface defines methods for interacting with an +// underlying file system. +type Interface interface { + TempDir(dir, prefix string) (string, error) + MkdirAll(path string, perm os.FileMode) error + Create(name string) (io.WriteCloser, error) + OpenFile(name string, flag int, perm os.FileMode) (io.WriteCloser, error) + RemoveAll(path string) error + ReadDir(dirname string) ([]os.FileInfo, error) + ReadFile(filename string) ([]byte, error) + DirExists(path string) (bool, error) + TempFile(dir, prefix string) (NameWriteCloser, error) + Stat(path string) (os.FileInfo, error) + Glob(path string) ([]string, error) +} + +type NameWriteCloser interface { + io.WriteCloser + + Name() string +} + +func NewFileSystem() Interface { + return &osFileSystem{} +} + +type osFileSystem struct{} + +func (fs *osFileSystem) Glob(path string) ([]string, error) { + return filepath.Glob(path) +} + +func (fs *osFileSystem) TempDir(dir, prefix string) (string, error) { + return os.MkdirTemp(dir, prefix) +} + +func (fs *osFileSystem) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (fs *osFileSystem) Create(name string) (io.WriteCloser, error) { + return os.Create(name) +} + +func (fs *osFileSystem) OpenFile(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + return os.OpenFile(name, flag, perm) +} + +func (fs *osFileSystem) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (fs *osFileSystem) ReadDir(dirname string) ([]os.FileInfo, error) { + var fileInfos []os.FileInfo + dirInfos, ise := os.ReadDir(dirname) + if ise != nil { + return fileInfos, ise + } + for _, dirInfo := range dirInfos { + fileInfo, ise := dirInfo.Info() + if ise == nil { + fileInfos = append(fileInfos, fileInfo) + } + } + return fileInfos, nil +} + +func (fs *osFileSystem) ReadFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func (fs *osFileSystem) DirExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (fs *osFileSystem) TempFile(dir, prefix string) (NameWriteCloser, error) { + return os.CreateTemp(dir, prefix) +} + +func (fs *osFileSystem) Stat(path string) (os.FileInfo, error) { + return os.Stat(path) +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/kube/kube_utils.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/kube/kube_utils.go new file mode 100644 index 000000000..e9bb3bc0a --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/kube/kube_utils.go @@ -0,0 +1,216 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "context" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + corev1api "k8s.io/api/core/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// NamespaceAndName returns a string in the format / +func NamespaceAndName(objMeta metav1.Object) string { + if objMeta.GetNamespace() == "" { + return objMeta.GetName() + } + return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName()) +} + +// EnsureNamespaceExistsAndIsReady attempts to create the provided Kubernetes namespace. +// It returns three values: a bool indicating whether or not the namespace is ready, +// a bool indicating whether or not the namespace was created and an error if the creation failed +// for a reason other than that the namespace already exists. Note that in the case where the +// namespace already exists and is not ready, this function will return (false, false, nil). +// If the namespace exists and is marked for deletion, this function will wait up to the timeout for it to fully delete. +func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client corev1client.NamespaceInterface, timeout time.Duration) (bool, bool, error) { + // nsCreated tells whether the namespace was created by this method + // required for keeping track of number of restored items + var nsCreated bool + var ready bool + err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { + clusterNS, err := client.Get(context.TODO(), namespace.Name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + // Namespace isn't in cluster, we're good to create. + return true, nil + } + + if err != nil { + // Return the err and exit the loop. + return true, err + } + + if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) { + // Marked for deletion, keep waiting + return false, nil + } + + // clusterNS found, is not nil, and not marked for deletion, therefore we shouldn't create it. + ready = true + return true, nil + }) + + // err will be set if we timed out or encountered issues retrieving the namespace, + if err != nil { + return false, nsCreated, errors.Wrapf(err, "error getting namespace %s", namespace.Name) + } + + // In the case the namespace already exists and isn't marked for deletion, assume it's ready for use. + if ready { + return true, nsCreated, nil + } + + clusterNS, err := client.Create(context.TODO(), namespace, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) { + // Somehow created after all our polling and marked for deletion, return an error + return false, nsCreated, errors.Errorf("namespace %s created and marked for termination after timeout", namespace.Name) + } + } else if err != nil { + return false, nsCreated, errors.Wrapf(err, "error creating namespace %s", namespace.Name) + } else { + nsCreated = true + } + + // The namespace created successfully + return true, nsCreated, nil +} + +func resetStatus(obj *unstructured.Unstructured) { + unstructured.RemoveNestedField(obj.UnstructuredContent(), "status") +} + +func ResetMetadataAndStatus(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + _, err := resetMetadata(obj) + if err != nil { + return nil, err + } + resetStatus(obj) + return obj, nil +} + +func resetMetadata(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + res, ok := obj.Object["metadata"] + if !ok { + return nil, errors.New("metadata not found") + } + metadata, ok := res.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("metadata was of type %T, expected map[string]interface{}", res) + } + + for k := range metadata { + switch k { + case "generateName", "selfLink", "uid", "resourceVersion", "generation", "creationTimestamp", "deletionTimestamp", + "deletionGracePeriodSeconds", "ownerReferences": + delete(metadata, k) + } + } + + return obj, nil +} + +// IsV1CRDReady checks a v1 CRD to see if it's ready, with both the Established and NamesAccepted conditions. +func IsV1CRDReady(crd *apiextv1.CustomResourceDefinition) bool { + var isEstablished, namesAccepted bool + for _, cond := range crd.Status.Conditions { + if cond.Type == apiextv1.Established && cond.Status == apiextv1.ConditionTrue { + isEstablished = true + } + if cond.Type == apiextv1.NamesAccepted && cond.Status == apiextv1.ConditionTrue { + namesAccepted = true + } + } + + return (isEstablished && namesAccepted) +} + +// IsV1Beta1CRDReady checks a v1beta1 CRD to see if it's ready, with both the Established and NamesAccepted conditions. +func IsV1Beta1CRDReady(crd *apiextv1beta1.CustomResourceDefinition) bool { + var isEstablished, namesAccepted bool + for _, cond := range crd.Status.Conditions { + if cond.Type == apiextv1beta1.Established && cond.Status == apiextv1beta1.ConditionTrue { + isEstablished = true + } + if cond.Type == apiextv1beta1.NamesAccepted && cond.Status == apiextv1beta1.ConditionTrue { + namesAccepted = true + } + } + + return (isEstablished && namesAccepted) +} + +// IsCRDReady triggers IsV1Beta1CRDReady/IsV1CRDReady according to the version of the input param +func IsCRDReady(crd *unstructured.Unstructured) (bool, error) { + ver := crd.GroupVersionKind().Version + switch ver { + case "v1beta1": + v1beta1crd := &apiextv1beta1.CustomResourceDefinition{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(crd.Object, v1beta1crd) + if err != nil { + return false, err + } + return IsV1Beta1CRDReady(v1beta1crd), nil + case "v1": + v1crd := &apiextv1.CustomResourceDefinition{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(crd.Object, v1crd) + if err != nil { + return false, err + } + return IsV1CRDReady(v1crd), nil + default: + return false, fmt.Errorf("unable to handle CRD with version %s", ver) + } +} + +func GeneratePatch(fromCluster, desired *unstructured.Unstructured) ([]byte, error) { + // If the objects are already equal, there's no need to generate a patch. + if equality.Semantic.DeepEqual(fromCluster, desired) { + return nil, nil + } + + desiredBytes, err := json.Marshal(desired.Object) + if err != nil { + return nil, errors.Wrap(err, "unable to marshal desired object") + } + + fromClusterBytes, err := json.Marshal(fromCluster.Object) + if err != nil { + return nil, errors.Wrap(err, "unable to marshal in-cluster object") + } + + patchBytes, err := jsonpatch.CreateMergePatch(fromClusterBytes, desiredBytes) + if err != nil { + return nil, errors.Wrap(err, "unable to create merge patch") + } + + return patchBytes, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/shortcut_expander.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/shortcut_expander.go new file mode 100644 index 000000000..2db9cd63e --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/shortcut_expander.go @@ -0,0 +1,142 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Modifications Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" +) + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type ResourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the +// resource first, then invokes the wrapped RESTMapper. +// +// This shortcutExpander differs from the upstream one in that it takes a []*metav1.APIResourceList +// in its constructor, rather than a discovery interface. This allows the discovery information to +// be cached externally so it doesn't have to be re-queried every time the shortcutExpander is invoked. +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + resources []*metav1.APIResourceList +} + +var _ meta.RESTMapper = &shortcutExpander{} + +func NewShortcutExpander(delegate meta.RESTMapper, resources []*metav1.APIResourceList) (shortcutExpander, error) { + return shortcutExpander{RESTMapper: delegate, resources: resources}, nil +} + +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) +} + +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]ResourceShortcuts, error) { + res := []ResourceShortcuts{} + for _, apiResources := range e.resources { + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.Errorf("Unable to parse groupversion %s. %v", apiResources.GroupVersion, err) + continue + } + rs := ResourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if resources, err := e.getShortcutMappings(); err == nil { + for _, item := range resources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range resources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + return resource + } + } + } + + return resource +} diff --git a/pkg/clustertree/cluster-manager/controllers/promote/utils/utils.go b/pkg/clustertree/cluster-manager/controllers/promote/utils/utils.go new file mode 100644 index 000000000..f2cce784f --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/promote/utils/utils.go @@ -0,0 +1,29 @@ +package utils + +import ( + "fmt" + "reflect" +) + +// ToMapSetE converts a slice or array to map[interface{}]interface{} with error +// interface{} is slice's item +func ToMapSetE(i interface{}) (interface{}, error) { + // judge the validation of the input + if i == nil { + return nil, fmt.Errorf("unable to convert %#v of type %T to map[interface{}]interface{}", i, i) + } + kind := reflect.TypeOf(i).Kind() + if kind != reflect.Slice && kind != reflect.Array { + return nil, fmt.Errorf("the input %#v of type %T isn't a slice or array", i, i) + } + + // execute the convert + v := reflect.ValueOf(i) + m := make(map[interface{}]interface{}) + for j := 0; j < v.Len(); j++ { + value := v.Index(j).Interface() + key := fmt.Sprintf("%v", v.Index(j).Interface()) + m[key] = value + } + return m, nil +} diff --git a/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go b/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go index 781135568..709f0683a 100644 --- a/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go +++ b/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go @@ -6,6 +6,7 @@ import ( "sync" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -38,6 +39,7 @@ type ClusterNode struct { type LeafResource struct { Client client.Client DynamicClient dynamic.Interface + DiscoveryClient *discovery.DiscoveryClient Clientset kubernetes.Interface KosmosClient kosmosversioned.Interface ClusterName string diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go index 2fe015053..db9c25526 100644 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_kosmos_client.go @@ -48,6 +48,10 @@ func (c *FakeKosmosV1alpha1) PodConvertPolicies(namespace string) v1alpha1.PodCo return &FakePodConvertPolicies{c, namespace} } +func (c *FakeKosmosV1alpha1) PromotePolicies(namespace string) v1alpha1.PromotePolicyInterface { + return &FakePromotePolicies{c, namespace} +} + func (c *FakeKosmosV1alpha1) ShadowDaemonSets(namespace string) v1alpha1.ShadowDaemonSetInterface { return &FakeShadowDaemonSets{c, namespace} } diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_promotepolicy.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_promotepolicy.go new file mode 100644 index 000000000..5949310ef --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/fake/fake_promotepolicy.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePromotePolicies implements PromotePolicyInterface +type FakePromotePolicies struct { + Fake *FakeKosmosV1alpha1 + ns string +} + +var promotepoliciesResource = schema.GroupVersionResource{Group: "kosmos.io", Version: "v1alpha1", Resource: "promotepolicies"} + +var promotepoliciesKind = schema.GroupVersionKind{Group: "kosmos.io", Version: "v1alpha1", Kind: "PromotePolicy"} + +// Get takes name of the promotePolicy, and returns the corresponding promotePolicy object, and an error if there is any. +func (c *FakePromotePolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PromotePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(promotepoliciesResource, c.ns, name), &v1alpha1.PromotePolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PromotePolicy), err +} + +// List takes label and field selectors, and returns the list of PromotePolicies that match those selectors. +func (c *FakePromotePolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PromotePolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(promotepoliciesResource, promotepoliciesKind, c.ns, opts), &v1alpha1.PromotePolicyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PromotePolicyList{ListMeta: obj.(*v1alpha1.PromotePolicyList).ListMeta} + for _, item := range obj.(*v1alpha1.PromotePolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested promotePolicies. +func (c *FakePromotePolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(promotepoliciesResource, c.ns, opts)) + +} + +// Create takes the representation of a promotePolicy and creates it. Returns the server's representation of the promotePolicy, and an error, if there is any. +func (c *FakePromotePolicies) Create(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.CreateOptions) (result *v1alpha1.PromotePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(promotepoliciesResource, c.ns, promotePolicy), &v1alpha1.PromotePolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PromotePolicy), err +} + +// Update takes the representation of a promotePolicy and updates it. Returns the server's representation of the promotePolicy, and an error, if there is any. +func (c *FakePromotePolicies) Update(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.UpdateOptions) (result *v1alpha1.PromotePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(promotepoliciesResource, c.ns, promotePolicy), &v1alpha1.PromotePolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PromotePolicy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePromotePolicies) UpdateStatus(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.UpdateOptions) (*v1alpha1.PromotePolicy, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(promotepoliciesResource, "status", c.ns, promotePolicy), &v1alpha1.PromotePolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PromotePolicy), err +} + +// Delete takes name of the promotePolicy and deletes it. Returns an error if one occurs. +func (c *FakePromotePolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(promotepoliciesResource, c.ns, name, opts), &v1alpha1.PromotePolicy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePromotePolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(promotepoliciesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.PromotePolicyList{}) + return err +} + +// Patch applies the patch and returns the patched promotePolicy. +func (c *FakePromotePolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PromotePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(promotepoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.PromotePolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PromotePolicy), err +} diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go index bdba06cb2..4c8e2e4e9 100644 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/generated_expansion.go @@ -20,6 +20,8 @@ type NodeConfigExpansion interface{} type PodConvertPolicyExpansion interface{} +type PromotePolicyExpansion interface{} + type ShadowDaemonSetExpansion interface{} type VirtualClusterExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go index 00a02b3da..cd9ab480d 100644 --- a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/kosmos_client.go @@ -21,6 +21,7 @@ type KosmosV1alpha1Interface interface { KnodesGetter NodeConfigsGetter PodConvertPoliciesGetter + PromotePoliciesGetter ShadowDaemonSetsGetter VirtualClustersGetter } @@ -66,6 +67,10 @@ func (c *KosmosV1alpha1Client) PodConvertPolicies(namespace string) PodConvertPo return newPodConvertPolicies(c, namespace) } +func (c *KosmosV1alpha1Client) PromotePolicies(namespace string) PromotePolicyInterface { + return newPromotePolicies(c, namespace) +} + func (c *KosmosV1alpha1Client) ShadowDaemonSets(namespace string) ShadowDaemonSetInterface { return newShadowDaemonSets(c, namespace) } diff --git a/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/promotepolicy.go b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/promotepolicy.go new file mode 100644 index 000000000..69cc4829b --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/kosmos/v1alpha1/promotepolicy.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + scheme "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PromotePoliciesGetter has a method to return a PromotePolicyInterface. +// A group's client should implement this interface. +type PromotePoliciesGetter interface { + PromotePolicies(namespace string) PromotePolicyInterface +} + +// PromotePolicyInterface has methods to work with PromotePolicy resources. +type PromotePolicyInterface interface { + Create(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.CreateOptions) (*v1alpha1.PromotePolicy, error) + Update(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.UpdateOptions) (*v1alpha1.PromotePolicy, error) + UpdateStatus(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.UpdateOptions) (*v1alpha1.PromotePolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PromotePolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PromotePolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PromotePolicy, err error) + PromotePolicyExpansion +} + +// promotePolicies implements PromotePolicyInterface +type promotePolicies struct { + client rest.Interface + ns string +} + +// newPromotePolicies returns a PromotePolicies +func newPromotePolicies(c *KosmosV1alpha1Client, namespace string) *promotePolicies { + return &promotePolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the promotePolicy, and returns the corresponding promotePolicy object, and an error if there is any. +func (c *promotePolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PromotePolicy, err error) { + result = &v1alpha1.PromotePolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("promotepolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PromotePolicies that match those selectors. +func (c *promotePolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PromotePolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PromotePolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("promotepolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested promotePolicies. +func (c *promotePolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("promotepolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a promotePolicy and creates it. Returns the server's representation of the promotePolicy, and an error, if there is any. +func (c *promotePolicies) Create(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.CreateOptions) (result *v1alpha1.PromotePolicy, err error) { + result = &v1alpha1.PromotePolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("promotepolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(promotePolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a promotePolicy and updates it. Returns the server's representation of the promotePolicy, and an error, if there is any. +func (c *promotePolicies) Update(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.UpdateOptions) (result *v1alpha1.PromotePolicy, err error) { + result = &v1alpha1.PromotePolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("promotepolicies"). + Name(promotePolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(promotePolicy). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *promotePolicies) UpdateStatus(ctx context.Context, promotePolicy *v1alpha1.PromotePolicy, opts v1.UpdateOptions) (result *v1alpha1.PromotePolicy, err error) { + result = &v1alpha1.PromotePolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("promotepolicies"). + Name(promotePolicy.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(promotePolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the promotePolicy and deletes it. Returns an error if one occurs. +func (c *promotePolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("promotepolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *promotePolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("promotepolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched promotePolicy. +func (c *promotePolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PromotePolicy, err error) { + result = &v1alpha1.PromotePolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("promotepolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 297eb2dd8..90fa99c84 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -56,6 +56,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().NodeConfigs().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("podconvertpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().PodConvertPolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("promotepolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().PromotePolicies().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("shadowdaemonsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kosmos().V1alpha1().ShadowDaemonSets().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("virtualclusters"): diff --git a/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go b/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go index 4f9349862..c31c4786f 100644 --- a/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go +++ b/pkg/generated/informers/externalversions/kosmos/v1alpha1/interface.go @@ -26,6 +26,8 @@ type Interface interface { NodeConfigs() NodeConfigInformer // PodConvertPolicies returns a PodConvertPolicyInformer. PodConvertPolicies() PodConvertPolicyInformer + // PromotePolicies returns a PromotePolicyInformer. + PromotePolicies() PromotePolicyInformer // ShadowDaemonSets returns a ShadowDaemonSetInformer. ShadowDaemonSets() ShadowDaemonSetInformer // VirtualClusters returns a VirtualClusterInformer. @@ -88,6 +90,11 @@ func (v *version) PodConvertPolicies() PodConvertPolicyInformer { return &podConvertPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// PromotePolicies returns a PromotePolicyInformer. +func (v *version) PromotePolicies() PromotePolicyInformer { + return &promotePolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ShadowDaemonSets returns a ShadowDaemonSetInformer. func (v *version) ShadowDaemonSets() ShadowDaemonSetInformer { return &shadowDaemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/generated/informers/externalversions/kosmos/v1alpha1/promotepolicy.go b/pkg/generated/informers/externalversions/kosmos/v1alpha1/promotepolicy.go new file mode 100644 index 000000000..c174f4516 --- /dev/null +++ b/pkg/generated/informers/externalversions/kosmos/v1alpha1/promotepolicy.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + kosmosv1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + versioned "github.com/kosmos.io/kosmos/pkg/generated/clientset/versioned" + internalinterfaces "github.com/kosmos.io/kosmos/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kosmos.io/kosmos/pkg/generated/listers/kosmos/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PromotePolicyInformer provides access to a shared informer and lister for +// PromotePolicies. +type PromotePolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PromotePolicyLister +} + +type promotePolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPromotePolicyInformer constructs a new informer for PromotePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPromotePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPromotePolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPromotePolicyInformer constructs a new informer for PromotePolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPromotePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KosmosV1alpha1().PromotePolicies(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KosmosV1alpha1().PromotePolicies(namespace).Watch(context.TODO(), options) + }, + }, + &kosmosv1alpha1.PromotePolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *promotePolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPromotePolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *promotePolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kosmosv1alpha1.PromotePolicy{}, f.defaultInformer) +} + +func (f *promotePolicyInformer) Lister() v1alpha1.PromotePolicyLister { + return v1alpha1.NewPromotePolicyLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go b/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go index 6641d329f..33ae22cd0 100644 --- a/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go +++ b/pkg/generated/listers/kosmos/v1alpha1/expansion_generated.go @@ -50,6 +50,14 @@ type PodConvertPolicyListerExpansion interface{} // PodConvertPolicyNamespaceLister. type PodConvertPolicyNamespaceListerExpansion interface{} +// PromotePolicyListerExpansion allows custom methods to be added to +// PromotePolicyLister. +type PromotePolicyListerExpansion interface{} + +// PromotePolicyNamespaceListerExpansion allows custom methods to be added to +// PromotePolicyNamespaceLister. +type PromotePolicyNamespaceListerExpansion interface{} + // ShadowDaemonSetListerExpansion allows custom methods to be added to // ShadowDaemonSetLister. type ShadowDaemonSetListerExpansion interface{} diff --git a/pkg/generated/listers/kosmos/v1alpha1/promotepolicy.go b/pkg/generated/listers/kosmos/v1alpha1/promotepolicy.go new file mode 100644 index 000000000..43598e615 --- /dev/null +++ b/pkg/generated/listers/kosmos/v1alpha1/promotepolicy.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PromotePolicyLister helps list PromotePolicies. +// All objects returned here must be treated as read-only. +type PromotePolicyLister interface { + // List lists all PromotePolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.PromotePolicy, err error) + // PromotePolicies returns an object that can list and get PromotePolicies. + PromotePolicies(namespace string) PromotePolicyNamespaceLister + PromotePolicyListerExpansion +} + +// promotePolicyLister implements the PromotePolicyLister interface. +type promotePolicyLister struct { + indexer cache.Indexer +} + +// NewPromotePolicyLister returns a new PromotePolicyLister. +func NewPromotePolicyLister(indexer cache.Indexer) PromotePolicyLister { + return &promotePolicyLister{indexer: indexer} +} + +// List lists all PromotePolicies in the indexer. +func (s *promotePolicyLister) List(selector labels.Selector) (ret []*v1alpha1.PromotePolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PromotePolicy)) + }) + return ret, err +} + +// PromotePolicies returns an object that can list and get PromotePolicies. +func (s *promotePolicyLister) PromotePolicies(namespace string) PromotePolicyNamespaceLister { + return promotePolicyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PromotePolicyNamespaceLister helps list and get PromotePolicies. +// All objects returned here must be treated as read-only. +type PromotePolicyNamespaceLister interface { + // List lists all PromotePolicies in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.PromotePolicy, err error) + // Get retrieves the PromotePolicy from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.PromotePolicy, error) + PromotePolicyNamespaceListerExpansion +} + +// promotePolicyNamespaceLister implements the PromotePolicyNamespaceLister +// interface. +type promotePolicyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PromotePolicies in the indexer for a given namespace. +func (s promotePolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PromotePolicy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PromotePolicy)) + }) + return ret, err +} + +// Get retrieves the PromotePolicy from the indexer for a given namespace and name. +func (s promotePolicyNamespaceLister) Get(name string) (*v1alpha1.PromotePolicy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("promotepolicy"), name) + } + return obj.(*v1alpha1.PromotePolicy), nil +} diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 1c7db7e07..be9105244 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -65,6 +65,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PodConvertPolicySpec": schema_pkg_apis_kosmos_v1alpha1_PodConvertPolicySpec(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PolicyTerm": schema_pkg_apis_kosmos_v1alpha1_PolicyTerm(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromoteResources": schema_pkg_apis_kosmos_v1alpha1_PromoteResources(ref), + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicy": schema_pkg_apis_kosmos_v1alpha1_PromotePolicy(ref), + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicyList": schema_pkg_apis_kosmos_v1alpha1_PromotePolicyList(ref), + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicyProgress": schema_pkg_apis_kosmos_v1alpha1_PromotePolicyProgress(ref), + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicySpec": schema_pkg_apis_kosmos_v1alpha1_PromotePolicySpec(ref), + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicyStatus": schema_pkg_apis_kosmos_v1alpha1_PromotePolicyStatus(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Proxy": schema_pkg_apis_kosmos_v1alpha1_Proxy(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.ResourceSelector": schema_pkg_apis_kosmos_v1alpha1_ResourceSelector(ref), "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.Route": schema_pkg_apis_kosmos_v1alpha1_Route(ref), @@ -2357,6 +2362,284 @@ func schema_pkg_apis_kosmos_v1alpha1_PromoteResources(ref common.ReferenceCallba } } +func schema_pkg_apis_kosmos_v1alpha1_PromotePolicy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PromotePolicy is custom resource that represents the capture of sync leaf cluster", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicySpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicyStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicySpec", "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicyStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_kosmos_v1alpha1_PromotePolicyList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BackupList is a list of promotePolicys.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicy"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicy", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_kosmos_v1alpha1_PromotePolicyProgress(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BackupProgress stores information about the progress of a Backup's execution.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "totalItems": { + SchemaProps: spec.SchemaProps{ + Description: "TotalItems is the total number of items to be backed up. This number may change throughout the execution of the backup due to plugins that return additional related items to back up, the velero.io/exclude-from-backup label, and various other filters that happen as items are processed.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "itemsBackedUp": { + SchemaProps: spec.SchemaProps{ + Description: "ItemsBackedUp is the number of items that have actually been written to the backup tarball so far.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_kosmos_v1alpha1_PromotePolicySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PromotePolicySpec defines the desired state of promotePolicy", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "clusterName": { + SchemaProps: spec.SchemaProps{ + Description: "Cluster is a cluster that needs to be migrated", + Type: []string{"string"}, + Format: "", + }, + }, + "includedNamespaces": { + SchemaProps: spec.SchemaProps{ + Description: "IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "excludedNamespaces": { + SchemaProps: spec.SchemaProps{ + Description: "ExcludedNamespaces contains a list of namespaces that are not included in the backup.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "includedNamespaceScopedResources": { + SchemaProps: spec.SchemaProps{ + Description: "IncludedNamespaceScopedResources is a slice of namespace-scoped resource type names to include in the backup. The default value is \"*\".", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "excludedNamespaceScopedResources": { + SchemaProps: spec.SchemaProps{ + Description: "ExcludedNamespaceScopedResources is a slice of namespace-scoped resource type names to exclude from the backup. If set to \"*\", all namespace-scoped resource types are excluded. The default value is empty.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "rollback": { + SchemaProps: spec.SchemaProps{ + Description: "Rollback set true, then rollback from the backup", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_kosmos_v1alpha1_PromotePolicyStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PromotePolicyStatus defines the observed state of promotePolicy", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase is the current state of the Backup.", + Type: []string{"string"}, + Format: "", + }, + }, + "precheckErrors": { + SchemaProps: spec.SchemaProps{ + Description: "PrecheckErrors is a slice of all precheck errors (if applicable).", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "startTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "StartTimestamp records the time a sync was started. The server's time is used for StartTimestamps", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "completionTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "CompletionTimestamp records the time a sync was completed. Completion time is recorded even on failed sync. The server's time is used for CompletionTimestamps", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "failureReason": { + SchemaProps: spec.SchemaProps{ + Description: "FailureReason is an error that caused the entire sync to fail.", + Type: []string{"string"}, + Format: "", + }, + }, + "progress": { + SchemaProps: spec.SchemaProps{ + Description: "Progress contains information about the sync's execution progress. Note that this information is best-effort only -- if fails to update it for any reason, it may be inaccurate/stale.", + Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicyProgress"), + }, + }, + "backedupFile": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.PromotePolicyProgress", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + func schema_pkg_apis_kosmos_v1alpha1_Proxy(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/kosmosctl/manifest/manifest_deployments.go b/pkg/kosmosctl/manifest/manifest_deployments.go index 3df1c76d9..b052194cb 100644 --- a/pkg/kosmosctl/manifest/manifest_deployments.go +++ b/pkg/kosmosctl/manifest/manifest_deployments.go @@ -136,6 +136,7 @@ spec: command: - clustertree-cluster-manager - --multi-cluster-service=true + - --forbid-promote-namespace=kube-node-lease,kube-public,kube-system - --v=4 volumes: - name: credentials diff --git a/pkg/utils/constants.go b/pkg/utils/constants.go index 26a3bc5ca..1a11918b9 100644 --- a/pkg/utils/constants.go +++ b/pkg/utils/constants.go @@ -187,3 +187,9 @@ var GVR_SERVICE = schema.GroupVersionResource{ Version: "v1", Resource: "services", } + +var GVR_CRD = schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", +} diff --git a/pkg/utils/podutils/pod.go b/pkg/utils/podutils/pod.go index 34636caae..1828adeab 100644 --- a/pkg/utils/podutils/pod.go +++ b/pkg/utils/podutils/pod.go @@ -326,6 +326,7 @@ func GetUpdatedPod(orig, update *corev1.Pod, ignoreLabels []string, leafMode clu orig.Labels = make(map[string]string) } orig.Labels[utils.KosmosPodLabel] = "true" + //delete(update.Annotations, "kubectl.kubernetes.io/last-applied-configuration") orig.Annotations = update.Annotations orig.Spec.ActiveDeadlineSeconds = update.Spec.ActiveDeadlineSeconds if orig.Labels != nil { diff --git a/vendor/modules.txt b/vendor/modules.txt index 6382b8dfd..67d68393c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1787,15 +1787,6 @@ k8s.io/kubernetes/pkg/volume/util/recyclerclient k8s.io/kubernetes/pkg/volume/util/subpath k8s.io/kubernetes/pkg/volume/util/types k8s.io/kubernetes/pkg/volume/util/volumepathhandler -# k8s.io/metrics v0.26.3 => k8s.io/metrics v0.26.3 -## explicit; go 1.19 -k8s.io/metrics/pkg/apis/metrics -k8s.io/metrics/pkg/apis/metrics/v1alpha1 -k8s.io/metrics/pkg/apis/metrics/v1beta1 -k8s.io/metrics/pkg/client/clientset/versioned -k8s.io/metrics/pkg/client/clientset/versioned/scheme -k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1 -k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1 # k8s.io/mount-utils v0.23.3 => k8s.io/mount-utils v0.26.3 ## explicit; go 1.19 k8s.io/mount-utils