diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 545466e932..fc0f5af8b9 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -109,6 +109,7 @@ jobs: "dry-run", "networkcontainer-controller", "networkpeering-controller", + "flex-to-dedicated", ] steps: - uses: actions/checkout@v4 diff --git a/internal/controller/atlas/api_error.go b/internal/controller/atlas/api_error.go index 9db0822ebf..952d53225f 100644 --- a/internal/controller/atlas/api_error.go +++ b/internal/controller/atlas/api_error.go @@ -40,6 +40,7 @@ const ( // ServerlessClusterFromClusterAPI indicates that we are trying to access // a serverless instance from the cluster API, which is not allowed ServerlessInstanceFromClusterAPI = "CANNOT_USE_SERVERLESS_INSTANCE_IN_CLUSTER_API" + FlexFromClusterAPI = "CANNOT_USE_FLEX_CLUSTER_IN_CLUSTER_API" ClusterInstanceFromServerlessAPI = "CANNOT_USE_CLUSTER_IN_SERVERLESS_INSTANCE_API" diff --git a/internal/controller/atlasdeployment/advanced_deployment.go b/internal/controller/atlasdeployment/advanced_deployment.go index 99057bb6b8..6d453aece8 100644 --- a/internal/controller/atlasdeployment/advanced_deployment.go +++ b/internal/controller/atlasdeployment/advanced_deployment.go @@ -41,11 +41,29 @@ import ( const FreeTier = "M0" func (r *AtlasDeploymentReconciler) handleAdvancedDeployment(ctx *workflow.Context, projectService project.ProjectService, deploymentService deployment.AtlasDeploymentsService, akoDeployment, atlasDeployment deployment.Deployment) (ctrl.Result, error) { + if akoDeployment.GetCustomResource().Spec.UpgradeToDedicated && !atlasDeployment.IsDedicated() { + if atlasDeployment.GetState() == status.StateUPDATING { + return r.inProgress(ctx, akoDeployment.GetCustomResource(), atlasDeployment, workflow.DeploymentUpdating, "deployment is updating") + } + + updatedDeployment, err := deploymentService.UpgradeToDedicated(ctx.Context, atlasDeployment, akoDeployment) + + if err != nil { + return r.terminate(ctx, workflow.DedicatedMigrationFailed, fmt.Errorf("failed to upgrade cluster: %w", err)) + } + + return r.inProgress(ctx, akoDeployment.GetCustomResource(), updatedDeployment, workflow.DedicatedMigrationProgressing, "Cluster upgrade to dedicated instance initiated in Atlas. The process may take several minutes") + } + akoCluster, ok := akoDeployment.(*deployment.Cluster) if !ok { return r.terminate(ctx, workflow.Internal, errors.New("deployment in AKO is not an advanced cluster")) } - atlasCluster, _ := atlasDeployment.(*deployment.Cluster) + + var atlasCluster *deployment.Cluster + if atlasCluster, ok = atlasDeployment.(*deployment.Cluster); atlasDeployment != nil && !ok { + return r.terminate(ctx, workflow.Internal, errors.New("deployment in Atlas is not an advanced cluster")) + } if atlasCluster == nil { ctx.Log.Infof("Advanced Deployment %s doesn't exist in Atlas - creating", akoCluster.GetName()) diff --git a/internal/controller/atlasdeployment/advanced_deployment_test.go b/internal/controller/atlasdeployment/advanced_deployment_test.go index d34d9bd4a4..d1c9c8b870 100644 --- a/internal/controller/atlasdeployment/advanced_deployment_test.go +++ b/internal/controller/atlasdeployment/advanced_deployment_test.go @@ -17,6 +17,7 @@ package atlasdeployment import ( "context" "errors" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -49,7 +50,7 @@ func TestHandleAdvancedDeployment(t *testing.T) { } tests := map[string]struct { atlasDeployment *akov2.AtlasDeployment - deploymentInAtlas *deployment.Cluster + deploymentInAtlas deployment.Deployment deploymentService func() deployment.AtlasDeploymentsService sdkMock func() *admin.APIClient expectedResult workflowRes @@ -779,6 +780,180 @@ func TestHandleAdvancedDeployment(t *testing.T) { WithMessageRegexp("deployment is updating"), }, }, + "fail to upgrade a shared cluster to dedicated": { + atlasDeployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + UpgradeToDedicated: true, + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + Name: "cluster0", + ClusterType: "REPLICASET", + ReplicationSpecs: []*akov2.AdvancedReplicationSpec{ + { + RegionConfigs: []*akov2.AdvancedRegionConfig{ + { + ProviderName: "AWS", + RegionName: "US_WEST_1", + Priority: pointer.MakePtr(7), + ElectableSpecs: &akov2.Specs{ + InstanceSize: "M20", + NodeCount: pointer.MakePtr(3), + }, + }, + }, + }, + }, + }, + }, + }, + deploymentInAtlas: &deployment.Flex{ + ProjectID: "project-id", + State: "IDLE", + FlexSpec: &akov2.FlexSpec{ + Name: "cluster0", + ProviderSettings: &akov2.FlexProviderSettings{ + RegionName: "US_EAST_1", + BackingProviderName: "AWS", + }, + }, + }, + deploymentService: func() deployment.AtlasDeploymentsService { + service := translation.NewAtlasDeploymentsServiceMock(t) + service.EXPECT().UpgradeToDedicated(context.Background(), mock.AnythingOfType("*deployment.Flex"), mock.AnythingOfType("*deployment.Cluster")). + Return(nil, errors.New("failed to update cluster")) + + return service + }, + sdkMock: func() *admin.APIClient { + return &admin.APIClient{} + }, + expectedResult: workflowRes{ + res: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + err: fmt.Errorf("failed to upgrade cluster: %w", errors.New("failed to update cluster")), + }, + expectedConditions: []api.Condition{ + api.FalseCondition(api.DeploymentReadyType). + WithReason(string(workflow.DedicatedMigrationFailed)). + WithMessageRegexp("failed to upgrade cluster: failed to update cluster"), + }, + }, + "watch upgrade progress": { + atlasDeployment: &akov2.AtlasDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster0", + Namespace: "test", + }, + Spec: akov2.AtlasDeploymentSpec{ + UpgradeToDedicated: true, + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + Name: "cluster0", + ClusterType: "REPLICASET", + ReplicationSpecs: []*akov2.AdvancedReplicationSpec{ + { + RegionConfigs: []*akov2.AdvancedRegionConfig{ + { + ProviderName: "AWS", + RegionName: "US_WEST_1", + Priority: pointer.MakePtr(7), + ElectableSpecs: &akov2.Specs{ + InstanceSize: "M20", + NodeCount: pointer.MakePtr(3), + }, + }, + }, + }, + }, + }, + }, + }, + deploymentInAtlas: &deployment.Flex{ + ProjectID: "project-id", + State: "UPDATING", + FlexSpec: &akov2.FlexSpec{ + Name: "cluster0", + ProviderSettings: &akov2.FlexProviderSettings{ + RegionName: "US_EAST_1", + BackingProviderName: "AWS", + }, + }, + }, + deploymentService: func() deployment.AtlasDeploymentsService { + service := translation.NewAtlasDeploymentsServiceMock(t) + + return service + }, + sdkMock: func() *admin.APIClient { + return &admin.APIClient{} + }, + expectedResult: workflowRes{ + res: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + err: nil, + }, + expectedConditions: []api.Condition{ + api.FalseCondition(api.DeploymentReadyType). + WithReason(string(workflow.DeploymentUpdating)). + WithMessageRegexp("deployment is updating"), + }, + }, + "upgrade a flex to dedicated cluster": { + atlasDeployment: &akov2.AtlasDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster0", + Namespace: "test", + }, + Spec: akov2.AtlasDeploymentSpec{ + UpgradeToDedicated: true, + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + Name: "cluster0", + ClusterType: "REPLICASET", + ReplicationSpecs: []*akov2.AdvancedReplicationSpec{ + { + RegionConfigs: []*akov2.AdvancedRegionConfig{ + { + ProviderName: "AWS", + RegionName: "US_WEST_1", + Priority: pointer.MakePtr(7), + ElectableSpecs: &akov2.Specs{ + InstanceSize: "M20", + NodeCount: pointer.MakePtr(3), + }, + }, + }, + }, + }, + }, + }, + }, + deploymentInAtlas: &deployment.Flex{ + ProjectID: "project-id", + State: "IDLE", + FlexSpec: &akov2.FlexSpec{ + Name: "cluster0", + ProviderSettings: &akov2.FlexProviderSettings{ + RegionName: "US_EAST_1", + BackingProviderName: "AWS", + }, + }, + }, + deploymentService: func() deployment.AtlasDeploymentsService { + service := translation.NewAtlasDeploymentsServiceMock(t) + service.EXPECT().UpgradeToDedicated(context.Background(), mock.AnythingOfType("*deployment.Flex"), mock.AnythingOfType("*deployment.Cluster")). + Return(&deployment.Flex{}, nil) + + return service + }, + sdkMock: func() *admin.APIClient { + return &admin.APIClient{} + }, + expectedResult: workflowRes{ + res: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + err: nil, + }, + expectedConditions: []api.Condition{ + api.FalseCondition(api.DeploymentReadyType). + WithReason(string(workflow.DedicatedMigrationProgressing)). + WithMessageRegexp("Cluster upgrade to dedicated instance initiated in Atlas. The process may take several minutes"), + }, + }, } for name, tt := range tests { diff --git a/internal/controller/atlasdeployment/atlasdeployment_controller.go b/internal/controller/atlasdeployment/atlasdeployment_controller.go index 617d44a0d0..92198a711a 100644 --- a/internal/controller/atlasdeployment/atlasdeployment_controller.go +++ b/internal/controller/atlasdeployment/atlasdeployment_controller.go @@ -159,10 +159,10 @@ func (r *AtlasDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Requ deploymentInAKO := deployment.NewDeployment(atlasProject.ID, atlasDeployment) - if ok, deprecationMsg := deploymentInAKO.Deprecated(); ok { + if ok, notificationReason, notificationMsg := deploymentInAKO.Notifications(); ok { // emit Log and event - r.Log.Log(zapcore.WarnLevel, deprecationMsg) - r.EventRecorder.Event(deploymentInAKO.GetCustomResource(), corev1.EventTypeWarning, "DeprecationWarning", deprecationMsg) + r.Log.Log(zapcore.WarnLevel, notificationMsg) + r.EventRecorder.Event(deploymentInAKO.GetCustomResource(), corev1.EventTypeWarning, notificationReason, notificationMsg) } deploymentInAtlas, err := deploymentService.GetDeployment(workflowCtx.Context, atlasProject.ID, atlasDeployment) @@ -173,7 +173,7 @@ func (r *AtlasDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Requ existsInAtlas := deploymentInAtlas != nil if !atlasDeployment.GetDeletionTimestamp().IsZero() { if existsInAtlas { - return r.delete(workflowCtx, deploymentService, deploymentInAKO) + return r.delete(workflowCtx, deploymentService, deploymentInAKO, deploymentInAtlas) } return r.unmanage(workflowCtx, deploymentInAKO) } @@ -195,28 +195,29 @@ func (r *AtlasDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Requ func (r *AtlasDeploymentReconciler) delete( ctx *workflow.Context, deploymentService deployment.AtlasDeploymentsService, - deployment deployment.Deployment, // this must be the original non converted deployment + deploymentInAKO deployment.Deployment, // this must be the original non converted deployment + deploymentInAtlas deployment.Deployment, // this must be the original non converted deployment ) (ctrl.Result, error) { - if err := r.cleanupBindings(ctx.Context, deployment); err != nil { + if err := r.cleanupBindings(ctx.Context, deploymentInAKO); err != nil { return r.terminate(ctx, workflow.Internal, fmt.Errorf("failed to cleanup deployment bindings (backups): %w", err)) } switch { - case customresource.IsResourcePolicyKeepOrDefault(deployment.GetCustomResource(), r.ObjectDeletionProtection): + case customresource.IsResourcePolicyKeepOrDefault(deploymentInAKO.GetCustomResource(), r.ObjectDeletionProtection): ctx.Log.Info("Not removing Atlas deployment from Atlas as per configuration") - case customresource.IsResourcePolicyKeep(deployment.GetCustomResource()): + case customresource.IsResourcePolicyKeep(deploymentInAKO.GetCustomResource()): ctx.Log.Infof("Not removing Atlas deployment from Atlas as the '%s' annotation is set", customresource.ResourcePolicyAnnotation) - case isTerminationProtectionEnabled(deployment.GetCustomResource()): - msg := fmt.Sprintf("Termination protection for %s deployment enabled. Deployment in Atlas won't be removed", deployment.GetName()) + case isTerminationProtectionEnabled(deploymentInAKO.GetCustomResource()): + msg := fmt.Sprintf("Termination protection for %s deployment enabled. Deployment in Atlas won't be removed", deploymentInAKO.GetName()) ctx.Log.Info(msg) - r.EventRecorder.Event(deployment.GetCustomResource(), "Warning", "AtlasDeploymentTermination", msg) + r.EventRecorder.Event(deploymentInAKO.GetCustomResource(), "Warning", "AtlasDeploymentTermination", msg) default: - if err := r.deleteDeploymentFromAtlas(ctx, deploymentService, deployment); err != nil { + if err := r.deleteDeploymentFromAtlas(ctx, deploymentService, deploymentInAKO, deploymentInAtlas); err != nil { return r.terminate(ctx, workflow.Internal, fmt.Errorf("failed to remove deployment from Atlas: %w", err)) } } - if err := customresource.ManageFinalizer(ctx.Context, r.Client, deployment.GetCustomResource(), customresource.UnsetFinalizer); err != nil { + if err := customresource.ManageFinalizer(ctx.Context, r.Client, deploymentInAKO.GetCustomResource(), customresource.UnsetFinalizer); err != nil { return r.terminate(ctx, workflow.Internal, fmt.Errorf("failed to remove finalizer: %w", err)) } @@ -235,15 +236,20 @@ func isTerminationProtectionEnabled(deployment *akov2.AtlasDeployment) bool { deployment.Spec.ServerlessSpec.TerminationProtectionEnabled) } -func (r *AtlasDeploymentReconciler) deleteDeploymentFromAtlas(ctx *workflow.Context, deploymentService deployment.AtlasDeploymentsService, deployment deployment.Deployment) error { - ctx.Log.Infow("-> Starting AtlasDeployment deletion", "spec", deployment) +func (r *AtlasDeploymentReconciler) deleteDeploymentFromAtlas( + ctx *workflow.Context, + deploymentService deployment.AtlasDeploymentsService, + deploymentInAKO deployment.Deployment, + deploymentInAtlas deployment.Deployment, +) error { + ctx.Log.Infow("-> Starting AtlasDeployment deletion", "spec", deploymentInAKO) - err := r.deleteConnectionStrings(ctx, deployment) + err := r.deleteConnectionStrings(ctx, deploymentInAKO) if err != nil { return err } - err = deploymentService.DeleteDeployment(ctx.Context, deployment) + err = deploymentService.DeleteDeployment(ctx.Context, deploymentInAtlas) if err != nil { ctx.Log.Errorw("Cannot delete Atlas deployment", "error", err) return err @@ -349,10 +355,10 @@ func (r *AtlasDeploymentReconciler) ready(ctx *workflow.Context, deploymentInAKO return r.terminate(ctx, workflow.AtlasFinalizerNotSet, err) } - _, deprecationMsg := deploymentInAKO.Deprecated() + _, _, notificationMsg := deploymentInAKO.Notifications() ctx.SetConditionTrue(api.DeploymentReadyType). - SetConditionTrueMsg(api.ReadyType, deprecationMsg). + SetConditionTrueMsg(api.ReadyType, notificationMsg). EnsureStatusOption(status.AtlasDeploymentStateNameOption(deploymentInAtlas.GetState())). EnsureStatusOption(status.AtlasDeploymentReplicaSet(deploymentInAtlas.GetReplicaSet())). EnsureStatusOption(status.AtlasDeploymentMongoDBVersionOption(deploymentInAtlas.GetMongoDBVersion())). diff --git a/internal/controller/atlasdeployment/atlasdeployment_controller_test.go b/internal/controller/atlasdeployment/atlasdeployment_controller_test.go index a4ba18cd64..d7a309db47 100644 --- a/internal/controller/atlasdeployment/atlasdeployment_controller_test.go +++ b/internal/controller/atlasdeployment/atlasdeployment_controller_test.go @@ -464,11 +464,15 @@ func TestRegularClusterReconciliation(t *testing.T) { globalAPI.EXPECT().GetManagedNamespaceExecute(mock.Anything). Return(&admin.GeoSharding20240805{}, nil, nil) - err := &admin.GenericOpenAPIError{} - err.SetModel(admin.ApiError{ErrorCode: atlas.NonFlexInFlexAPI}) - flexAPI := mockadmin.NewFlexClustersApi(t) + serverlessErr := &admin.GenericOpenAPIError{} + serverlessErr.SetModel(admin.ApiError{ErrorCode: atlas.ClusterInstanceFromServerlessAPI}) + serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, project.ID(), mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, serverlessErr) + cloudBackupsAPI := mockadmin.NewCloudBackupsApi(t) cloudBackupsAPI.EXPECT().GetBackupSchedule(mock.Anything, project.ID(), d.Spec.DeploymentSpec.Name). Return(admin.GetBackupScheduleApiRequest{ApiService: cloudBackupsAPI}) @@ -502,7 +506,7 @@ func TestRegularClusterReconciliation(t *testing.T) { FlexClustersApi: flexAPI, ClustersApi: clusterAPI, AtlasSearchApi: searchAPI, - ServerlessInstancesApi: mockadmin.NewServerlessInstancesApi(t), + ServerlessInstancesApi: serverlessAPI, GlobalClustersApi: globalAPI, ProjectsApi: projectAPI, CloudBackupsApi: cloudBackupsAPI, @@ -595,9 +599,8 @@ func TestServerlessInstanceReconciliation(t *testing.T) { atlasProvider := &atlasmock.TestProvider{ SdkClientSetFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*atlas.ClientSet, error) { - err := &admin.GenericOpenAPIError{} - err.SetModel(admin.ApiError{ErrorCode: atlas.ServerlessInstanceFromClusterAPI}) clusterAPI := mockadmin.NewClustersApi(t) + flexAPI := mockadmin.NewFlexClustersApi(t) serverlessAPI := mockadmin.NewServerlessInstancesApi(t) serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, project.ID(), mock.Anything). @@ -634,8 +637,6 @@ func TestServerlessInstanceReconciliation(t *testing.T) { projectAPI.EXPECT().GetProjectByNameExecute(mock.Anything). Return(&admin.Group{Id: pointer.MakePtr("abc123")}, nil, nil) - flexAPI := mockadmin.NewFlexClustersApi(t) - return &atlas.ClientSet{ SdkClient20250312002: &admin.APIClient{ FlexClustersApi: flexAPI, @@ -753,8 +754,20 @@ func TestFlexClusterReconciliation(t *testing.T) { nil, ) + clusterErr := &admin.GenericOpenAPIError{} + clusterErr.SetModel(admin.ApiError{ErrorCode: atlas.FlexFromClusterAPI}) clusterAPI := mockadmin.NewClustersApi(t) + clusterAPI.EXPECT().GetCluster(mock.Anything, project.ID(), mock.Anything). + Return(admin.GetClusterApiRequest{ApiService: clusterAPI}) + clusterAPI.EXPECT().GetClusterExecute(mock.AnythingOfType("admin.GetClusterApiRequest")). + Return(nil, nil, clusterErr) + + serverlessErr := &admin.GenericOpenAPIError{} + serverlessErr.SetModel(admin.ApiError{ErrorCode: atlas.ClusterInstanceFromServerlessAPI}) serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, project.ID(), mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, serverlessErr) projectAPI := mockadmin.NewProjectsApi(t) projectAPI.EXPECT().GetProjectByName(mock.Anything, "MyProject"). @@ -898,11 +911,15 @@ func TestDeletionReconciliation(t *testing.T) { logger := zaptest.NewLogger(t).Sugar() atlasProvider := &atlasmock.TestProvider{ SdkClientSetFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*atlas.ClientSet, error) { - err := &admin.GenericOpenAPIError{} - err.SetModel(admin.ApiError{ErrorCode: atlas.NonFlexInFlexAPI}) - flexAPI := mockadmin.NewFlexClustersApi(t) + serverlessErr := &admin.GenericOpenAPIError{} + serverlessErr.SetModel(admin.ApiError{ErrorCode: atlas.ClusterInstanceFromServerlessAPI}) + serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, project.ID(), mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, serverlessErr) + clusterAPI := mockadmin.NewClustersApi(t) clusterAPI.EXPECT().GetCluster(mock.Anything, project.ID(), mock.Anything). Return(admin.GetClusterApiRequest{ApiService: clusterAPI}) @@ -949,7 +966,7 @@ func TestDeletionReconciliation(t *testing.T) { SdkClient20250312002: &admin.APIClient{ FlexClustersApi: flexAPI, ClustersApi: clusterAPI, - ServerlessInstancesApi: mockadmin.NewServerlessInstancesApi(t), + ServerlessInstancesApi: serverlessAPI, ProjectsApi: projectAPI, }, }, nil @@ -1248,8 +1265,9 @@ func TestChangeDeploymentType(t *testing.T) { tests := map[string]struct { deployment *akov2.AtlasDeployment atlasProvider atlas.Provider + errorMsg string }{ - "should fail when existing cluster is regular but manifest defines a serverless instance": { + "should fail when existing cluster is regular but manifest defines a serverless instance": { //nolint:dupl deployment: &akov2.AtlasDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster0", @@ -1274,7 +1292,102 @@ func TestChangeDeploymentType(t *testing.T) { StateName: "IDLE", }, }, + //nolint:dupl + atlasProvider: &atlasmock.TestProvider{ + IsCloudGovFunc: func() bool { + return false + }, + IsSupportedFunc: func() bool { + return true + }, + ClientFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*mongodbatlas.Client, error) { + return &mongodbatlas.Client{}, nil + }, + SdkClientSetFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*atlas.ClientSet, error) { + flexAPI := mockadmin.NewFlexClustersApi(t) + + serverlessErr := &admin.GenericOpenAPIError{} + serverlessErr.SetModel(admin.ApiError{ErrorCode: atlas.ClusterInstanceFromServerlessAPI}) + serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, "abc123", mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, serverlessErr) + + clusterAPI := mockadmin.NewClustersApi(t) + clusterAPI.EXPECT().GetCluster(mock.Anything, "abc123", mock.Anything). + Return(admin.GetClusterApiRequest{ApiService: clusterAPI}) + clusterAPI.EXPECT().GetClusterExecute(mock.AnythingOfType("admin.GetClusterApiRequest")). + Return( + &admin.ClusterDescription20240805{ + GroupId: pointer.MakePtr("abc123"), + Name: pointer.MakePtr("cluster0"), + ClusterType: pointer.MakePtr("REPLICASET"), + BackupEnabled: pointer.MakePtr(true), + StateName: pointer.MakePtr("IDLE"), + ReplicationSpecs: &[]admin.ReplicationSpec20240805{ + { + ZoneName: pointer.MakePtr("Zone 1"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + ProviderName: pointer.MakePtr("AWS"), + RegionName: pointer.MakePtr("US_EAST_1"), + Priority: pointer.MakePtr(7), + ElectableSpecs: &admin.HardwareSpec20240805{ + InstanceSize: pointer.MakePtr("M10"), + NodeCount: pointer.MakePtr(3), + }, + }, + }, + }, + }, + }, + &http.Response{}, + nil, + ) + projectAPI := mockadmin.NewProjectsApi(t) + projectAPI.EXPECT().GetProjectByName(mock.Anything, "MyProject"). + Return(admin.GetProjectByNameApiRequest{ApiService: projectAPI}) + projectAPI.EXPECT().GetProjectByNameExecute(mock.Anything). + Return(&admin.Group{Id: pointer.MakePtr("abc123")}, nil, nil) + + return &atlas.ClientSet{ + SdkClient20250312002: &admin.APIClient{ + FlexClustersApi: flexAPI, + ServerlessInstancesApi: serverlessAPI, + ClustersApi: clusterAPI, + ProjectsApi: projectAPI, + }, + }, nil + }, + }, + errorMsg: "deployment in Atlas is not a serverless cluster", + }, + "should fail when existing cluster is regular but manifest defines a flex instance": { //nolint:dupl + deployment: &akov2.AtlasDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster0", + Namespace: "default", + }, + Spec: akov2.AtlasDeploymentSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{ + Name: "my-project", + Namespace: "default", + }, + }, + FlexSpec: &akov2.FlexSpec{ + Name: "cluster0", + ProviderSettings: &akov2.FlexProviderSettings{ + BackingProviderName: "AWS", + RegionName: "US_EAST_1", + }, + }, + }, + Status: status.AtlasDeploymentStatus{ + StateName: "IDLE", + }, + }, //nolint:dupl atlasProvider: &atlasmock.TestProvider{ IsCloudGovFunc: func() bool { @@ -1287,14 +1400,46 @@ func TestChangeDeploymentType(t *testing.T) { return &mongodbatlas.Client{}, nil }, SdkClientSetFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*atlas.ClientSet, error) { + flexAPI := mockadmin.NewFlexClustersApi(t) + + serverlessErr := &admin.GenericOpenAPIError{} + serverlessErr.SetModel(admin.ApiError{ErrorCode: atlas.ClusterInstanceFromServerlessAPI}) serverlessAPI := mockadmin.NewServerlessInstancesApi(t) - serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, "abc123", "cluster0"). + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, "abc123", mock.Anything). Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, serverlessErr) - err := &admin.GenericOpenAPIError{} - err.SetModel(admin.ApiError{ErrorCode: atlas.ClusterInstanceFromServerlessAPI}) - err.SetError("wrong API") - serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, err) + clusterAPI := mockadmin.NewClustersApi(t) + clusterAPI.EXPECT().GetCluster(mock.Anything, "abc123", mock.Anything). + Return(admin.GetClusterApiRequest{ApiService: clusterAPI}) + clusterAPI.EXPECT().GetClusterExecute(mock.AnythingOfType("admin.GetClusterApiRequest")). + Return( + &admin.ClusterDescription20240805{ + GroupId: pointer.MakePtr("abc123"), + Name: pointer.MakePtr("cluster0"), + ClusterType: pointer.MakePtr("REPLICASET"), + BackupEnabled: pointer.MakePtr(true), + StateName: pointer.MakePtr("IDLE"), + ReplicationSpecs: &[]admin.ReplicationSpec20240805{ + { + ZoneName: pointer.MakePtr("Zone 1"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + ProviderName: pointer.MakePtr("AWS"), + RegionName: pointer.MakePtr("US_EAST_1"), + Priority: pointer.MakePtr(7), + ElectableSpecs: &admin.HardwareSpec20240805{ + InstanceSize: pointer.MakePtr("M10"), + NodeCount: pointer.MakePtr(3), + }, + }, + }, + }, + }, + }, + &http.Response{}, + nil, + ) projectAPI := mockadmin.NewProjectsApi(t) projectAPI.EXPECT().GetProjectByName(mock.Anything, "MyProject"). @@ -1304,12 +1449,15 @@ func TestChangeDeploymentType(t *testing.T) { return &atlas.ClientSet{ SdkClient20250312002: &admin.APIClient{ + FlexClustersApi: flexAPI, ServerlessInstancesApi: serverlessAPI, + ClustersApi: clusterAPI, ProjectsApi: projectAPI, }, }, nil }, }, + errorMsg: "deployment in Atlas is not a flex cluster", }, "should fail when existing cluster is serverless instance but manifest defines a regular deployment": { deployment: &akov2.AtlasDeployment{ @@ -1345,13 +1493,29 @@ func TestChangeDeploymentType(t *testing.T) { }, SdkClientSetFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*atlas.ClientSet, error) { clusterAPI := mockadmin.NewClustersApi(t) - clusterAPI.EXPECT().GetCluster(mock.Anything, "abc123", "cluster0"). - Return(admin.GetClusterApiRequest{ApiService: clusterAPI}) + flexAPI := mockadmin.NewFlexClustersApi(t) - err := &admin.GenericOpenAPIError{} - err.SetModel(admin.ApiError{ErrorCode: atlas.ServerlessInstanceFromClusterAPI}) - err.SetError("wrong API") - clusterAPI.EXPECT().GetClusterExecute(mock.Anything).Return(nil, nil, err) + serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, "abc123", mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return( + &admin.ServerlessInstanceDescription{ + GroupId: pointer.MakePtr("abc123"), + Name: pointer.MakePtr("cluster0"), + ProviderSettings: admin.ServerlessProviderSettings{ + BackingProviderName: "AWS", + ProviderName: pointer.MakePtr("SERVERLESS"), + RegionName: "US_EAST_1", + }, + ServerlessBackupOptions: &admin.ClusterServerlessBackupOptions{ + ServerlessContinuousBackupEnabled: pointer.MakePtr(false), + }, + StateName: pointer.MakePtr("IDLE"), + TerminationProtectionEnabled: pointer.MakePtr(false), + }, + nil, + nil, + ) projectAPI := mockadmin.NewProjectsApi(t) projectAPI.EXPECT().GetProjectByName(mock.Anything, "MyProject"). @@ -1361,12 +1525,15 @@ func TestChangeDeploymentType(t *testing.T) { return &atlas.ClientSet{ SdkClient20250312002: &admin.APIClient{ - ClustersApi: clusterAPI, - ProjectsApi: projectAPI, + ClustersApi: clusterAPI, + FlexClustersApi: flexAPI, + ServerlessInstancesApi: serverlessAPI, + ProjectsApi: projectAPI, }, }, nil }, }, + errorMsg: "deployment in Atlas is not an advanced cluster", }, } @@ -1446,7 +1613,7 @@ func TestChangeDeploymentType(t *testing.T) { api.TrueCondition(api.ValidationSucceeded), api.FalseCondition(api.DeploymentReadyType). WithReason(string(workflow.Internal)). - WithMessageRegexp("wrong API"), + WithMessageRegexp(tt.errorMsg), }, tt.deployment.Status.Conditions, cmpopts.IgnoreFields(api.Condition{}, "LastTransitionTime"), diff --git a/internal/controller/atlasdeployment/flex_deployment.go b/internal/controller/atlasdeployment/flex_deployment.go index 6945a6cc22..ae2522cde5 100644 --- a/internal/controller/atlasdeployment/flex_deployment.go +++ b/internal/controller/atlasdeployment/flex_deployment.go @@ -32,9 +32,13 @@ func (r *AtlasDeploymentReconciler) handleFlexInstance(ctx *workflow.Context, pr deploymentService deployment.AtlasDeploymentsService, akoDeployment, atlasDeployment deployment.Deployment) (ctrl.Result, error) { akoFlex, ok := akoDeployment.(*deployment.Flex) if !ok { - return r.terminate(ctx, workflow.Internal, errors.New("deployment in AKO is not a serverless cluster")) + return r.terminate(ctx, workflow.Internal, errors.New("deployment in AKO is not a flex cluster")) + } + + var atlasFlex *deployment.Flex + if atlasFlex, ok = atlasDeployment.(*deployment.Flex); atlasDeployment != nil && !ok { + return r.terminate(ctx, workflow.Internal, errors.New("deployment in Atlas is not a flex cluster")) } - atlasFlex, _ := atlasDeployment.(*deployment.Flex) if atlasFlex == nil { ctx.Log.Infof("Flex Instance %s doesn't exist in Atlas - creating", akoFlex.GetName()) diff --git a/internal/controller/atlasdeployment/serverless_deployment.go b/internal/controller/atlasdeployment/serverless_deployment.go index 10ed226800..420e092bac 100644 --- a/internal/controller/atlasdeployment/serverless_deployment.go +++ b/internal/controller/atlasdeployment/serverless_deployment.go @@ -34,7 +34,11 @@ func (r *AtlasDeploymentReconciler) handleServerlessInstance(ctx *workflow.Conte if !ok { return r.terminate(ctx, workflow.Internal, errors.New("deployment in AKO is not a serverless cluster")) } - atlasServerless, _ := atlasDeployment.(*deployment.Serverless) + + var atlasServerless *deployment.Serverless + if atlasServerless, ok = atlasDeployment.(*deployment.Serverless); atlasDeployment != nil && !ok { + return r.terminate(ctx, workflow.Internal, errors.New("deployment in Atlas is not a serverless cluster")) + } if atlasServerless == nil { ctx.Log.Infof("Serverless Instance %s doesn't exist in Atlas - creating", akoServerless.GetName()) diff --git a/internal/controller/workflow/reason.go b/internal/controller/workflow/reason.go index 19f3e76b02..c56809ea06 100644 --- a/internal/controller/workflow/reason.go +++ b/internal/controller/workflow/reason.go @@ -78,6 +78,8 @@ const ( DeploymentUpdating ConditionReason = "DeploymentUpdating" DeploymentConnectionSecretsNotCreated ConditionReason = "DeploymentConnectionSecretsNotCreated" DeploymentAdvancedOptionsReady ConditionReason = "DeploymentAdvancedOptionsReady" + DedicatedMigrationProgressing ConditionReason = "DedicatedMigrationProgressing" + DedicatedMigrationFailed ConditionReason = "DedicatedMigrationFailed" ServerlessPrivateEndpointReady ConditionReason = "ServerlessPrivateEndpointReady" ServerlessPrivateEndpointFailed ConditionReason = "ServerlessPrivateEndpointFailed" ServerlessPrivateEndpointInProgress ConditionReason = "ServerlessPrivateEndpointInProgress" diff --git a/internal/mocks/translation/atlas_deployments_service.go b/internal/mocks/translation/atlas_deployments_service.go index 9b0af5d355..62ded04514 100644 --- a/internal/mocks/translation/atlas_deployments_service.go +++ b/internal/mocks/translation/atlas_deployments_service.go @@ -964,9 +964,9 @@ func (_c *AtlasDeploymentsServiceMock_UpdateProcessArgs_Call) RunAndReturn(run f return _c } -// UpgradeToDedicated provides a mock function with given fields: ctx, _a1 -func (_m *AtlasDeploymentsServiceMock) UpgradeToDedicated(ctx context.Context, _a1 deployment.Deployment) (deployment.Deployment, error) { - ret := _m.Called(ctx, _a1) +// UpgradeToDedicated provides a mock function with given fields: ctx, currentDeployment, targetDeployment +func (_m *AtlasDeploymentsServiceMock) UpgradeToDedicated(ctx context.Context, currentDeployment deployment.Deployment, targetDeployment deployment.Deployment) (deployment.Deployment, error) { + ret := _m.Called(ctx, currentDeployment, targetDeployment) if len(ret) == 0 { panic("no return value specified for UpgradeToDedicated") @@ -974,19 +974,19 @@ func (_m *AtlasDeploymentsServiceMock) UpgradeToDedicated(ctx context.Context, _ var r0 deployment.Deployment var r1 error - if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment) (deployment.Deployment, error)); ok { - return rf(ctx, _a1) + if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment, deployment.Deployment) (deployment.Deployment, error)); ok { + return rf(ctx, currentDeployment, targetDeployment) } - if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment) deployment.Deployment); ok { - r0 = rf(ctx, _a1) + if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment, deployment.Deployment) deployment.Deployment); ok { + r0 = rf(ctx, currentDeployment, targetDeployment) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(deployment.Deployment) } } - if rf, ok := ret.Get(1).(func(context.Context, deployment.Deployment) error); ok { - r1 = rf(ctx, _a1) + if rf, ok := ret.Get(1).(func(context.Context, deployment.Deployment, deployment.Deployment) error); ok { + r1 = rf(ctx, currentDeployment, targetDeployment) } else { r1 = ret.Error(1) } @@ -1001,14 +1001,15 @@ type AtlasDeploymentsServiceMock_UpgradeToDedicated_Call struct { // UpgradeToDedicated is a helper method to define mock.On call // - ctx context.Context -// - _a1 deployment.Deployment -func (_e *AtlasDeploymentsServiceMock_Expecter) UpgradeToDedicated(ctx interface{}, _a1 interface{}) *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call { - return &AtlasDeploymentsServiceMock_UpgradeToDedicated_Call{Call: _e.mock.On("UpgradeToDedicated", ctx, _a1)} +// - currentDeployment deployment.Deployment +// - targetDeployment deployment.Deployment +func (_e *AtlasDeploymentsServiceMock_Expecter) UpgradeToDedicated(ctx interface{}, currentDeployment interface{}, targetDeployment interface{}) *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call { + return &AtlasDeploymentsServiceMock_UpgradeToDedicated_Call{Call: _e.mock.On("UpgradeToDedicated", ctx, currentDeployment, targetDeployment)} } -func (_c *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call) Run(run func(ctx context.Context, _a1 deployment.Deployment)) *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call { +func (_c *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call) Run(run func(ctx context.Context, currentDeployment deployment.Deployment, targetDeployment deployment.Deployment)) *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(deployment.Deployment)) + run(args[0].(context.Context), args[1].(deployment.Deployment), args[2].(deployment.Deployment)) }) return _c } @@ -1018,7 +1019,7 @@ func (_c *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call) Return(_a0 deploy return _c } -func (_c *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call) RunAndReturn(run func(context.Context, deployment.Deployment) (deployment.Deployment, error)) *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call { +func (_c *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call) RunAndReturn(run func(context.Context, deployment.Deployment, deployment.Deployment) (deployment.Deployment, error)) *AtlasDeploymentsServiceMock_UpgradeToDedicated_Call { _c.Call.Return(run) return _c } diff --git a/internal/mocks/translation/deployment.go b/internal/mocks/translation/deployment.go index 14bc172e51..a620bb8ca4 100644 --- a/internal/mocks/translation/deployment.go +++ b/internal/mocks/translation/deployment.go @@ -27,7 +27,7 @@ func (_m *DeploymentMock) Deprecated() (bool, string) { ret := _m.Called() if len(ret) == 0 { - panic("no return value specified for Deprecated") + panic("no return value specified for Notifications") } var r0 bool @@ -57,7 +57,7 @@ type DeploymentMock_Deprecated_Call struct { // Deprecated is a helper method to define mock.On call func (_e *DeploymentMock_Expecter) Deprecated() *DeploymentMock_Deprecated_Call { - return &DeploymentMock_Deprecated_Call{Call: _e.mock.On("Deprecated")} + return &DeploymentMock_Deprecated_Call{Call: _e.mock.On("Notifications")} } func (_c *DeploymentMock_Deprecated_Call) Run(run func()) *DeploymentMock_Deprecated_Call { diff --git a/internal/mocks/translation/deployment_service.go b/internal/mocks/translation/deployment_service.go index 28bc4041e3..2c2c7362e4 100644 --- a/internal/mocks/translation/deployment_service.go +++ b/internal/mocks/translation/deployment_service.go @@ -577,9 +577,9 @@ func (_c *DeploymentServiceMock_UpdateProcessArgs_Call) RunAndReturn(run func(co return _c } -// UpgradeToDedicated provides a mock function with given fields: ctx, _a1 -func (_m *DeploymentServiceMock) UpgradeToDedicated(ctx context.Context, _a1 deployment.Deployment) (deployment.Deployment, error) { - ret := _m.Called(ctx, _a1) +// UpgradeToDedicated provides a mock function with given fields: ctx, currentDeployment, targetDeployment +func (_m *DeploymentServiceMock) UpgradeToDedicated(ctx context.Context, currentDeployment deployment.Deployment, targetDeployment deployment.Deployment) (deployment.Deployment, error) { + ret := _m.Called(ctx, currentDeployment, targetDeployment) if len(ret) == 0 { panic("no return value specified for UpgradeToDedicated") @@ -587,19 +587,19 @@ func (_m *DeploymentServiceMock) UpgradeToDedicated(ctx context.Context, _a1 dep var r0 deployment.Deployment var r1 error - if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment) (deployment.Deployment, error)); ok { - return rf(ctx, _a1) + if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment, deployment.Deployment) (deployment.Deployment, error)); ok { + return rf(ctx, currentDeployment, targetDeployment) } - if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment) deployment.Deployment); ok { - r0 = rf(ctx, _a1) + if rf, ok := ret.Get(0).(func(context.Context, deployment.Deployment, deployment.Deployment) deployment.Deployment); ok { + r0 = rf(ctx, currentDeployment, targetDeployment) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(deployment.Deployment) } } - if rf, ok := ret.Get(1).(func(context.Context, deployment.Deployment) error); ok { - r1 = rf(ctx, _a1) + if rf, ok := ret.Get(1).(func(context.Context, deployment.Deployment, deployment.Deployment) error); ok { + r1 = rf(ctx, currentDeployment, targetDeployment) } else { r1 = ret.Error(1) } @@ -614,14 +614,15 @@ type DeploymentServiceMock_UpgradeToDedicated_Call struct { // UpgradeToDedicated is a helper method to define mock.On call // - ctx context.Context -// - _a1 deployment.Deployment -func (_e *DeploymentServiceMock_Expecter) UpgradeToDedicated(ctx interface{}, _a1 interface{}) *DeploymentServiceMock_UpgradeToDedicated_Call { - return &DeploymentServiceMock_UpgradeToDedicated_Call{Call: _e.mock.On("UpgradeToDedicated", ctx, _a1)} +// - currentDeployment deployment.Deployment +// - targetDeployment deployment.Deployment +func (_e *DeploymentServiceMock_Expecter) UpgradeToDedicated(ctx interface{}, currentDeployment interface{}, targetDeployment interface{}) *DeploymentServiceMock_UpgradeToDedicated_Call { + return &DeploymentServiceMock_UpgradeToDedicated_Call{Call: _e.mock.On("UpgradeToDedicated", ctx, currentDeployment, targetDeployment)} } -func (_c *DeploymentServiceMock_UpgradeToDedicated_Call) Run(run func(ctx context.Context, _a1 deployment.Deployment)) *DeploymentServiceMock_UpgradeToDedicated_Call { +func (_c *DeploymentServiceMock_UpgradeToDedicated_Call) Run(run func(ctx context.Context, currentDeployment deployment.Deployment, targetDeployment deployment.Deployment)) *DeploymentServiceMock_UpgradeToDedicated_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(deployment.Deployment)) + run(args[0].(context.Context), args[1].(deployment.Deployment), args[2].(deployment.Deployment)) }) return _c } @@ -631,7 +632,7 @@ func (_c *DeploymentServiceMock_UpgradeToDedicated_Call) Return(_a0 deployment.D return _c } -func (_c *DeploymentServiceMock_UpgradeToDedicated_Call) RunAndReturn(run func(context.Context, deployment.Deployment) (deployment.Deployment, error)) *DeploymentServiceMock_UpgradeToDedicated_Call { +func (_c *DeploymentServiceMock_UpgradeToDedicated_Call) RunAndReturn(run func(context.Context, deployment.Deployment, deployment.Deployment) (deployment.Deployment, error)) *DeploymentServiceMock_UpgradeToDedicated_Call { _c.Call.Return(run) return _c } diff --git a/internal/translation/deployment/conversion.go b/internal/translation/deployment/conversion.go index dcac50338b..6b2f06c83a 100644 --- a/internal/translation/deployment/conversion.go +++ b/internal/translation/deployment/conversion.go @@ -30,6 +30,9 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/tag" ) +const NOTIFICATION_REASON_DEPRECATION = "DeprecationWarning" +const NOTIFICATION_REASON_RECOMMENDATION = "RecommendationWarning" + type Deployment interface { GetName() string GetProjectID() string @@ -40,7 +43,9 @@ type Deployment interface { GetReplicaSet() []status.ReplicaSet IsServerless() bool IsFlex() bool - Deprecated() (bool, string) + IsTenant() bool + IsDedicated() bool + Notifications() (bool, string, string) } type Cluster struct { @@ -99,7 +104,11 @@ func (c *Cluster) IsTenant() bool { return c.isTenant } -func (c *Cluster) Deprecated() (bool, string) { +func (c *Cluster) IsDedicated() bool { + return !c.IsTenant() +} + +func (c *Cluster) Notifications() (bool, string, string) { for _, replicationSpec := range c.ReplicationSpecs { if replicationSpec == nil { continue @@ -113,19 +122,25 @@ func (c *Cluster) Deprecated() (bool, string) { if deprecatedSpecs(regionConfig.ElectableSpecs) || deprecatedSpecs(regionConfig.ReadOnlySpecs) || deprecatedSpecs(regionConfig.AnalyticsSpecs) { - return true, "WARNING: M2 and M5 instance sizes are deprecated. See https://dochub.mongodb.org/core/atlas-flex-migration for details." + return true, NOTIFICATION_REASON_DEPRECATION, "WARNING: M2 and M5 instance sizes are deprecated. See https://dochub.mongodb.org/core/atlas-flex-migration for details." } } } - if c.ProcessArgs != nil { - if c.ProcessArgs.DefaultReadConcern != "" { - return true, "Process Arg DefaultReadConcern is no longer available in Atlas. Setting this will have no effect." + + processArgs := c.customResource.Spec.ProcessArgs + if processArgs != nil { + if processArgs.DefaultReadConcern != "" { + return true, NOTIFICATION_REASON_DEPRECATION, "Process Arg DefaultReadConcern is no longer available in Atlas. Setting this will have no effect." } - if c.ProcessArgs.FailIndexKeyTooLong != nil { - return true, "Process Arg FailIndexKeyTooLong is no longer available in Atlas. Setting this will have no effect." + if processArgs.FailIndexKeyTooLong != nil { + return true, NOTIFICATION_REASON_DEPRECATION, "Process Arg FailIndexKeyTooLong is no longer available in Atlas. Setting this will have no effect." } } - return false, "" + + if c.IsDedicated() && c.customResource.Spec.UpgradeToDedicated { + return true, NOTIFICATION_REASON_RECOMMENDATION, "Cluster is already dedicated. It’s recommended to remove or set the upgrade flag to false" + } + return false, "", "" } func deprecatedSpecs(specs *akov2.Specs) bool { @@ -184,8 +199,16 @@ func (s *Serverless) IsFlex() bool { return false } -func (s *Serverless) Deprecated() (bool, string) { - return true, "WARNING: Serverless is deprecated. See https://dochub.mongodb.org/core/atlas-flex-migration for details." +func (s *Serverless) IsTenant() bool { + return false +} + +func (s *Serverless) IsDedicated() bool { + return false +} + +func (s *Serverless) Notifications() (bool, string, string) { + return true, NOTIFICATION_REASON_DEPRECATION, "WARNING: Serverless is deprecated. See https://dochub.mongodb.org/core/atlas-flex-migration for details." } type Flex struct { @@ -234,8 +257,16 @@ func (f *Flex) IsFlex() bool { return true } -func (f *Flex) Deprecated() (bool, string) { - return false, "" +func (f *Flex) IsTenant() bool { + return false +} + +func (f *Flex) IsDedicated() bool { + return false +} + +func (f *Flex) Notifications() (bool, string, string) { + return false, "", "" } type Connection struct { @@ -1245,21 +1276,22 @@ func flexUpdateToAtlas(flex *Flex) *admin.FlexClusterDescriptionUpdate20241113 { } } -func flexUpgradeToAtlas(flex *Flex) *admin.AtlasTenantClusterUpgradeRequest20240805 { - cluster := flex.GetCustomResource().Spec.DeploymentSpec +func flexUpgradeToAtlas(cluster *Cluster) *admin.AtlasTenantClusterUpgradeRequest20240805 { + spec := cluster.GetCustomResource().Spec.DeploymentSpec return &admin.AtlasTenantClusterUpgradeRequest20240805{ - ClusterType: pointer.MakePtrOrNil(cluster.ClusterType), - MongoDBMajorVersion: pointer.MakePtrOrNil(cluster.MongoDBMajorVersion), - VersionReleaseSystem: pointer.MakePtrOrNil(cluster.VersionReleaseSystem), - BackupEnabled: cluster.BackupEnabled, - BiConnector: biConnectToAtlas(cluster.BiConnector), - EncryptionAtRestProvider: pointer.MakePtrOrNil(cluster.EncryptionAtRestProvider), - Labels: labelsToAtlas(cluster.Labels), - Paused: cluster.Paused, - PitEnabled: cluster.PitEnabled, - ReplicationSpecs: replicationSpecToAtlas(cluster.ReplicationSpecs, cluster.ClusterType, cluster.DiskSizeGB), - RootCertType: pointer.MakePtrOrNil(cluster.RootCertType), - Tags: tag.ToAtlas(cluster.Tags), - TerminationProtectionEnabled: pointer.MakePtrOrNil(cluster.TerminationProtectionEnabled), + Name: spec.Name, + ClusterType: pointer.MakePtrOrNil(spec.ClusterType), + MongoDBMajorVersion: pointer.MakePtrOrNil(spec.MongoDBMajorVersion), + VersionReleaseSystem: pointer.MakePtrOrNil(spec.VersionReleaseSystem), + BackupEnabled: spec.BackupEnabled, + BiConnector: biConnectToAtlas(spec.BiConnector), + EncryptionAtRestProvider: pointer.MakePtrOrNil(spec.EncryptionAtRestProvider), + Labels: labelsToAtlas(spec.Labels), + Paused: spec.Paused, + PitEnabled: spec.PitEnabled, + ReplicationSpecs: replicationSpecToAtlas(spec.ReplicationSpecs, spec.ClusterType, spec.DiskSizeGB), + RootCertType: pointer.MakePtrOrNil(spec.RootCertType), + Tags: tag.ToAtlas(spec.Tags), + TerminationProtectionEnabled: pointer.MakePtrOrNil(spec.TerminationProtectionEnabled), } } diff --git a/internal/translation/deployment/conversion_test.go b/internal/translation/deployment/conversion_test.go index 448739158c..26a6ce1629 100644 --- a/internal/translation/deployment/conversion_test.go +++ b/internal/translation/deployment/conversion_test.go @@ -883,6 +883,7 @@ func TestDeprecated(t *testing.T) { name string deployment *akov2.AtlasDeployment wantDeprecated bool + wantReason string wantMsg string }{ { @@ -1035,6 +1036,7 @@ func TestDeprecated(t *testing.T) { }, }, wantDeprecated: true, + wantReason: NOTIFICATION_REASON_DEPRECATION, wantMsg: "WARNING: M2 and M5 instance sizes are deprecated. See https://dochub.mongodb.org/core/atlas-flex-migration for details.", }, { @@ -1066,8 +1068,37 @@ func TestDeprecated(t *testing.T) { }, }, wantDeprecated: true, + wantReason: NOTIFICATION_REASON_DEPRECATION, wantMsg: "WARNING: M2 and M5 instance sizes are deprecated. See https://dochub.mongodb.org/core/atlas-flex-migration for details.", }, + { + name: "default read concern set", + deployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + DeploymentSpec: &akov2.AdvancedDeploymentSpec{}, + ProcessArgs: &akov2.ProcessArgs{ + DefaultReadConcern: "true", + }, + }, + }, + wantDeprecated: true, + wantReason: NOTIFICATION_REASON_DEPRECATION, + wantMsg: "Process Arg DefaultReadConcern is no longer available in Atlas. Setting this will have no effect.", + }, + { + name: "fail index key too long set", + deployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + DeploymentSpec: &akov2.AdvancedDeploymentSpec{}, + ProcessArgs: &akov2.ProcessArgs{ + FailIndexKeyTooLong: pointer.MakePtr(true), + }, + }, + }, + wantDeprecated: true, + wantReason: NOTIFICATION_REASON_DEPRECATION, + wantMsg: "Process Arg FailIndexKeyTooLong is no longer available in Atlas. Setting this will have no effect.", + }, { name: "empty serverless instance", deployment: &akov2.AtlasDeployment{ @@ -1076,14 +1107,138 @@ func TestDeprecated(t *testing.T) { }, }, wantDeprecated: true, + wantReason: NOTIFICATION_REASON_DEPRECATION, wantMsg: "WARNING: Serverless is deprecated. See https://dochub.mongodb.org/core/atlas-flex-migration for details.", }, + { + name: "remove upgrade flag", + deployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + UpgradeToDedicated: true, + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + Name: "cluster0", + ClusterType: "REPLICASET", + ReplicationSpecs: []*akov2.AdvancedReplicationSpec{ + { + RegionConfigs: []*akov2.AdvancedRegionConfig{ + { + ProviderName: "AWS", + RegionName: "US_EAST_1", + Priority: pointer.MakePtr(7), + ElectableSpecs: &akov2.Specs{ + InstanceSize: "M10", + NodeCount: pointer.MakePtr(3), + }, + }, + }, + }, + }, + }, + }, + }, + wantDeprecated: true, + wantReason: NOTIFICATION_REASON_RECOMMENDATION, + wantMsg: "Cluster is already dedicated. It’s recommended to remove or set the upgrade flag to false", + }, } { t.Run(tc.name, func(t *testing.T) { d := NewDeployment("123", tc.deployment) - gotDeprecated, gotMsg := d.Deprecated() + gotDeprecated, gotReason, gotMsg := d.Notifications() require.Equal(t, tc.wantDeprecated, gotDeprecated) + require.Equal(t, tc.wantReason, gotReason) require.Equal(t, tc.wantMsg, gotMsg) }) } } + +func TestIsType(t *testing.T) { + tests := map[string]struct { + deployment *akov2.AtlasDeployment + wantServerless bool + wantFlex bool + wantTenant bool + wantDedicated bool + }{ + "Cluster is serverless": { + deployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + ServerlessSpec: &akov2.ServerlessSpec{}, + }, + }, + wantServerless: true, + wantFlex: false, + wantTenant: false, + wantDedicated: false, + }, + "Cluster is flex": { + deployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + FlexSpec: &akov2.FlexSpec{}, + }, + }, + wantServerless: false, + wantFlex: true, + wantTenant: false, + wantDedicated: false, + }, + "Cluster is tenant": { + deployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + ReplicationSpecs: []*akov2.AdvancedReplicationSpec{ + { + RegionConfigs: []*akov2.AdvancedRegionConfig{ + { + ProviderName: "TENANT", + BackingProviderName: "AWS", + ElectableSpecs: &akov2.Specs{ + InstanceSize: "M0", + }, + }, + }, + }, + }, + }, + }, + }, + wantServerless: false, + wantFlex: false, + wantTenant: true, + wantDedicated: false, + }, + "Cluster is dedicated": { + deployment: &akov2.AtlasDeployment{ + Spec: akov2.AtlasDeploymentSpec{ + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + ReplicationSpecs: []*akov2.AdvancedReplicationSpec{ + { + RegionConfigs: []*akov2.AdvancedRegionConfig{ + { + ProviderName: "AWS", + ElectableSpecs: &akov2.Specs{ + InstanceSize: "M10", + }, + }, + }, + }, + }, + }, + }, + }, + wantServerless: false, + wantFlex: false, + wantTenant: false, + wantDedicated: true, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + d := NewDeployment("123", tt.deployment) + assert.Equal(t, tt.wantServerless, d.IsServerless()) + assert.Equal(t, tt.wantFlex, d.IsFlex()) + assert.Equal(t, tt.wantTenant, d.IsTenant()) + assert.Equal(t, tt.wantDedicated, d.IsDedicated()) + }) + } +} diff --git a/internal/translation/deployment/deployment.go b/internal/translation/deployment/deployment.go index 3a0b909724..019f823357 100644 --- a/internal/translation/deployment/deployment.go +++ b/internal/translation/deployment/deployment.go @@ -42,7 +42,7 @@ type DeploymentService interface { CreateDeployment(ctx context.Context, deployment Deployment) (Deployment, error) UpdateDeployment(ctx context.Context, deployment Deployment) (Deployment, error) DeleteDeployment(ctx context.Context, deployment Deployment) error - UpgradeToDedicated(ctx context.Context, deployment Deployment) (Deployment, error) + UpgradeToDedicated(ctx context.Context, currentDeployment, targetDeployment Deployment) (Deployment, error) ClusterWithProcessArgs(ctx context.Context, cluster *Cluster) error UpdateProcessArgs(ctx context.Context, cluster *Cluster) error } @@ -225,33 +225,28 @@ func (ds *ProductionAtlasDeployments) GetDeployment(ctx context.Context, project return nil, errors.New("deployment is nil") } - switch { - case deployment.IsFlex(): - flex, err := ds.GetFlexCluster(ctx, projectID, deployment.GetDeploymentName()) - if err != nil { - return nil, err - } - if flex != nil { - return flex, err - } + serverless, err := ds.GetServerless(ctx, projectID, deployment.GetDeploymentName()) + if !admin.IsErrorCode(err, atlas.ClusterInstanceFromServerlessAPI) && err != nil { + return nil, err + } + if serverless != nil { + return serverless, nil + } - case deployment.IsServerless(): - serverless, err := ds.GetServerless(ctx, projectID, deployment.GetDeploymentName()) - if err != nil { - return nil, err - } - if serverless != nil { - return serverless, err - } + cluster, err := ds.GetCluster(ctx, projectID, deployment.GetDeploymentName()) + if !admin.IsErrorCode(err, atlas.ServerlessInstanceFromClusterAPI) && !admin.IsErrorCode(err, atlas.FlexFromClusterAPI) && err != nil { + return nil, err + } + if cluster != nil { + return cluster, nil + } - case deployment.IsAdvancedDeployment(): - cluster, err := ds.GetCluster(ctx, projectID, deployment.GetDeploymentName()) - if err != nil { - return nil, err - } - if cluster != nil { - return cluster, err - } + flex, err := ds.GetFlexCluster(ctx, projectID, deployment.GetDeploymentName()) + if !admin.IsErrorCode(err, atlas.NonFlexInFlexAPI) && err != nil { + return nil, err + } + if flex != nil { + return flex, nil } // not found @@ -336,14 +331,15 @@ func (ds *ProductionAtlasDeployments) DeleteDeployment(ctx context.Context, depl return nil } -func (ds *ProductionAtlasDeployments) UpgradeToDedicated(ctx context.Context, deployment Deployment) (Deployment, error) { - switch d := deployment.(type) { +func (ds *ProductionAtlasDeployments) UpgradeToDedicated(ctx context.Context, currentDeployment, targetDeployment Deployment) (Deployment, error) { + switch currentDeployment.(type) { case *Cluster: return nil, errors.New("upgrade from shared to dedicated is not supported") case *Serverless: return nil, errors.New("upgrade from serverless to dedicated is not supported") case *Flex: - flex, _, err := ds.flexAPI.UpgradeFlexCluster(ctx, deployment.GetProjectID(), flexUpgradeToAtlas(d)).Execute() + d := targetDeployment.(*Cluster) + flex, _, err := ds.flexAPI.UpgradeFlexCluster(ctx, targetDeployment.GetProjectID(), flexUpgradeToAtlas(d)).Execute() if err != nil { return nil, err } diff --git a/internal/translation/deployment/deployment_test.go b/internal/translation/deployment/deployment_test.go index 6756aadee5..6c70b381ae 100644 --- a/internal/translation/deployment/deployment_test.go +++ b/internal/translation/deployment/deployment_test.go @@ -358,9 +358,14 @@ func TestGetDeployment(t *testing.T) { clusterAPI.EXPECT().GetClusterExecute(mock.AnythingOfType("admin.GetClusterApiRequest")). Return(nil, nil, errors.New("failed to get cluster from atlas")) - serverlessInstanceAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, "project-id", mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, atlasAPIError(atlas.ClusterInstanceFromServerlessAPI)) + flexAPI := mockadmin.NewFlexClustersApi(t) - return clusterAPI, serverlessInstanceAPI, flexAPI + + return clusterAPI, serverlessAPI, flexAPI }, err: errors.New("failed to get cluster from atlas"), }, @@ -376,6 +381,7 @@ func TestGetDeployment(t *testing.T) { Return(nil, nil, errors.New("failed to get serverless instance from atlas")) flexAPI := mockadmin.NewFlexClustersApi(t) + return clusterAPI, serverlessInstanceAPI, flexAPI }, err: errors.New("failed to get serverless instance from atlas"), @@ -389,15 +395,32 @@ func TestGetDeployment(t *testing.T) { clusterAPI.EXPECT().GetClusterExecute(mock.AnythingOfType("admin.GetClusterApiRequest")). Return(nil, nil, atlasAPIError(atlas.ClusterNotFound)) - serverlessInstanceAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, "project-id", mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, atlasAPIError(atlas.ClusterInstanceFromServerlessAPI)) + flexAPI := mockadmin.NewFlexClustersApi(t) - return clusterAPI, serverlessInstanceAPI, flexAPI + flexAPI.EXPECT().GetFlexCluster(mock.Anything, "project-id", mock.Anything). + Return(admin.GetFlexClusterApiRequest{ApiService: flexAPI}) + flexAPI.EXPECT().GetFlexClusterExecute(mock.Anything).Return(nil, nil, atlasAPIError(atlas.NonFlexInFlexAPI)) + + return clusterAPI, serverlessAPI, flexAPI }, }, "should return nil when serverless instance doesn't exist": { deployment: serverlessInstance(), apiMocker: func() (admin.ClustersApi, admin.ServerlessInstancesApi, admin.FlexClustersApi) { clusterAPI := mockadmin.NewClustersApi(t) + clusterAPI.EXPECT().GetCluster(context.Background(), "project-id", "instance0"). + Return(admin.GetClusterApiRequest{ApiService: clusterAPI}) + clusterAPI.EXPECT().GetClusterExecute(mock.AnythingOfType("admin.GetClusterApiRequest")). + Return(nil, nil, atlasAPIError(atlas.ServerlessInstanceFromClusterAPI)) + + flexAPI := mockadmin.NewFlexClustersApi(t) + flexAPI.EXPECT().GetFlexCluster(mock.Anything, "project-id", mock.Anything). + Return(admin.GetFlexClusterApiRequest{ApiService: flexAPI}) + flexAPI.EXPECT().GetFlexClusterExecute(mock.Anything).Return(nil, nil, atlasAPIError(atlas.NonFlexInFlexAPI)) serverlessInstanceAPI := mockadmin.NewServerlessInstancesApi(t) serverlessInstanceAPI.EXPECT().GetServerlessInstance(context.Background(), "project-id", "instance0"). @@ -405,7 +428,6 @@ func TestGetDeployment(t *testing.T) { serverlessInstanceAPI.EXPECT().GetServerlessInstanceExecute(mock.AnythingOfType("admin.GetServerlessInstanceApiRequest")). Return(nil, nil, atlasAPIError(atlas.ServerlessInstanceNotFound)) - flexAPI := mockadmin.NewFlexClustersApi(t) return clusterAPI, serverlessInstanceAPI, flexAPI }, }, @@ -422,9 +444,14 @@ func TestGetDeployment(t *testing.T) { nil, ) - serverlessInstanceAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI := mockadmin.NewServerlessInstancesApi(t) + serverlessAPI.EXPECT().GetServerlessInstance(mock.Anything, "project-id", mock.Anything). + Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessAPI}) + serverlessAPI.EXPECT().GetServerlessInstanceExecute(mock.Anything).Return(nil, nil, atlasAPIError(atlas.ClusterInstanceFromServerlessAPI)) + flexAPI := mockadmin.NewFlexClustersApi(t) - return clusterAPI, serverlessInstanceAPI, flexAPI + + return clusterAPI, serverlessAPI, flexAPI }, result: expectedGeoShardedCluster(), }, @@ -433,6 +460,8 @@ func TestGetDeployment(t *testing.T) { apiMocker: func() (admin.ClustersApi, admin.ServerlessInstancesApi, admin.FlexClustersApi) { clusterAPI := mockadmin.NewClustersApi(t) + flexAPI := mockadmin.NewFlexClustersApi(t) + serverlessInstanceAPI := mockadmin.NewServerlessInstancesApi(t) serverlessInstanceAPI.EXPECT().GetServerlessInstance(context.Background(), "project-id", "instance0"). Return(admin.GetServerlessInstanceApiRequest{ApiService: serverlessInstanceAPI}) @@ -443,7 +472,6 @@ func TestGetDeployment(t *testing.T) { nil, ) - flexAPI := mockadmin.NewFlexClustersApi(t) return clusterAPI, serverlessInstanceAPI, flexAPI }, result: expectedServerlessInstance(), @@ -972,13 +1000,14 @@ func TestUpdateProcessArgs(t *testing.T) { func TestUpgradeCluster(t *testing.T) { tests := map[string]struct { - deployment Deployment - apiMocker func() (admin.ClustersApi, admin.ServerlessInstancesApi, admin.FlexClustersApi) - result Deployment - err error + currentDeployment Deployment + targetDeployment Deployment + apiMocker func() (admin.ClustersApi, admin.ServerlessInstancesApi, admin.FlexClustersApi) + result Deployment + err error }{ "should fail to upgrade shared cluster in atlas": { - deployment: &Cluster{}, + currentDeployment: &Cluster{}, apiMocker: func() (admin.ClustersApi, admin.ServerlessInstancesApi, admin.FlexClustersApi) { clusterAPI := mockadmin.NewClustersApi(t) serverlessInstanceAPI := mockadmin.NewServerlessInstancesApi(t) @@ -988,7 +1017,7 @@ func TestUpgradeCluster(t *testing.T) { err: errors.New("upgrade from shared to dedicated is not supported"), }, "should fail to upgrade serverless instance in atlas": { - deployment: &Serverless{}, + currentDeployment: &Serverless{}, apiMocker: func() (admin.ClustersApi, admin.ServerlessInstancesApi, admin.FlexClustersApi) { clusterAPI := mockadmin.NewClustersApi(t) serverlessInstanceAPI := mockadmin.NewServerlessInstancesApi(t) @@ -998,11 +1027,14 @@ func TestUpgradeCluster(t *testing.T) { err: errors.New("upgrade from serverless to dedicated is not supported"), }, "should fail to upgrade flex instance in atlas": { - deployment: &Flex{ + currentDeployment: &Flex{}, + targetDeployment: &Cluster{ ProjectID: "project-id", customResource: &akov2.AtlasDeployment{ Spec: akov2.AtlasDeploymentSpec{ - DeploymentSpec: &akov2.AdvancedDeploymentSpec{}, + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + Name: "cluster0", + }, }, }, }, @@ -1019,11 +1051,14 @@ func TestUpgradeCluster(t *testing.T) { err: errors.New("failed to upgrade flex cluster in atlas"), }, "should upgrade flex instance in atlas": { - deployment: &Flex{ + currentDeployment: &Flex{}, + targetDeployment: &Cluster{ ProjectID: "project-id", customResource: &akov2.AtlasDeployment{ Spec: akov2.AtlasDeploymentSpec{ - DeploymentSpec: &akov2.AdvancedDeploymentSpec{}, + DeploymentSpec: &akov2.AdvancedDeploymentSpec{ + Name: "cluster0", + }, }, }, }, @@ -1057,7 +1092,7 @@ func TestUpgradeCluster(t *testing.T) { clusterAPI, serverlessInstanceAPI, flexAPI := tt.apiMocker() service := NewAtlasDeployments(clusterAPI, serverlessInstanceAPI, nil, flexAPI, false) - result, err := service.UpgradeToDedicated(context.Background(), tt.deployment) + result, err := service.UpgradeToDedicated(context.Background(), tt.currentDeployment, tt.targetDeployment) require.Equal(t, tt.err, err) assert.Equal(t, tt.result, result) }) diff --git a/scripts/launch-ci-e2e.sh b/scripts/launch-ci-e2e.sh index e728171a82..834f2f1130 100755 --- a/scripts/launch-ci-e2e.sh +++ b/scripts/launch-ci-e2e.sh @@ -18,6 +18,9 @@ set -euo pipefail helm version go version + +make manager + cd test/e2e # no `long-run`, no `broken` tests. `Long-run` tests run as a separate job @@ -27,6 +30,6 @@ else filter="$TEST_NAME && !long-run && !broken"; fi -AKO_E2E_TEST=1 ginkgo --output-interceptor-mode=none --label-filter="${filter}" --timeout 120m --nodes=10 \ +NO_GORUN=1 AKO_E2E_TEST=1 ginkgo --output-interceptor-mode=none --label-filter="${filter}" --timeout 120m --nodes=10 \ --race --cover --v --coverpkg=github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/... \ --coverprofile=coverprofile.out diff --git a/test/e2e/flex_to_dedicated_test.go b/test/e2e/flex_to_dedicated_test.go new file mode 100644 index 0000000000..63f94fe0a4 --- /dev/null +++ b/test/e2e/flex_to_dedicated_test.go @@ -0,0 +1,287 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e_test + +import ( + "context" + "os/exec" + "syscall" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + akov2common "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/k8s" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/utils" +) + +var _ = Describe("Flex to Dedicated Upgrade", Ordered, Label("flex-to-dedicated"), func() { + var kubeClient client.Client + var testNamespace *corev1.Namespace + var resourcePrefix string + var akoCmd *exec.Cmd + + _ = BeforeAll(func(ctx context.Context) { + cmd, err := k8s.RunManagerBinary(false) + Expect(err).To(BeNil()) + akoCmd = cmd + + client, err := k8s.CreateNewClient() + Expect(err).To(BeNil()) + kubeClient = client + }) + + _ = AfterAll(func(ctx context.Context) { + if akoCmd != nil { + if akoCmd.Process != nil { + Expect(syscall.Kill(akoCmd.Process.Pid, syscall.SIGTERM)).To(Succeed()) + } + + Expect(akoCmd.Wait()).To(Succeed()) + } + }) + + _ = BeforeEach(func(ctx context.Context) { + Expect(akoCmd.ProcessState).To(BeNil()) + + resourcePrefix = utils.RandomName("flex-to-dedicated") + + testNamespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: resourcePrefix + "-ns", + }} + Expect(kubeClient.Create(ctx, testNamespace)).To(Succeed()) + }) + + _ = AfterEach(func(ctx context.Context) { + if kubeClient == nil { + return + } + Expect(kubeClient.Delete(ctx, testNamespace)).To(Succeed()) + + Eventually(func(g Gomega) { + g.Expect(kubeClient.Get(ctx, client.ObjectKeyFromObject(testNamespace), testNamespace)).To(Succeed()) + }).WithTimeout(time.Minute).WithPolling(time.Second).To(Succeed()) + }) + + It("Should upgrade a Flex cluster to a Dedicated cluster", func(ctx context.Context) { + By("Create a secret with Atlas credentials", func() { + k8s.CreateDefaultSecret(ctx, kubeClient, resourcePrefix+"-secret", testNamespace.Name) + }) + + By("Create Atlas Project", func() { + project := akov2.AtlasProject{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcePrefix + "-project", + Namespace: testNamespace.Name, + }, + Spec: akov2.AtlasProjectSpec{ + Name: resourcePrefix + "-project", + ConnectionSecret: &akov2common.ResourceRefNamespaced{ + Name: resourcePrefix + "-secret", + }, + }, + } + + Expect(kubeClient.Create(ctx, &project)).To(Succeed()) + + Eventually(func(g Gomega) { + condition, err := k8s.GetProjectStatusCondition(ctx, kubeClient, api.ReadyType, testNamespace.Name, resourcePrefix+"-project") + g.Expect(err).To(BeNil()) + g.Expect(condition).To(Equal("True")) + }).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + }) + + By("Create a Flex cluster", func() { + flexCluster := akov2.AtlasDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcePrefix + "-cluster", + Namespace: testNamespace.Name, + }, + Spec: akov2.AtlasDeploymentSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &akov2common.ResourceRefNamespaced{ + Name: resourcePrefix + "-project", + }, + }, + FlexSpec: &akov2.FlexSpec{ + Name: resourcePrefix + "-cluster", + ProviderSettings: &akov2.FlexProviderSettings{ + BackingProviderName: "AWS", + RegionName: "US_EAST_2", + }, + }, + }, + } + + Expect(kubeClient.Create(ctx, &flexCluster)).To(Succeed()) + Eventually(func(g Gomega) { + condition, err := k8s.GetDeploymentStatusCondition(ctx, kubeClient, api.ReadyType, testNamespace.Name, resourcePrefix+"-cluster") + g.Expect(err).To(BeNil()) + g.Expect(condition).To(Equal("True")) + }).WithTimeout(10 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + }) + + By("Upgrade Flex cluster to Dedicated cluster", func() { + var deployment akov2.AtlasDeployment + Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-cluster"}, &deployment)).To(Succeed()) + + deployment.Spec.UpgradeToDedicated = true + deployment.Spec.FlexSpec = nil + deployment.Spec.DeploymentSpec = &akov2.AdvancedDeploymentSpec{ + Name: resourcePrefix + "-cluster", + ClusterType: "REPLICASET", + ReplicationSpecs: []*akov2.AdvancedReplicationSpec{ + { + RegionConfigs: []*akov2.AdvancedRegionConfig{ + { + ProviderName: "AWS", + RegionName: "EU_CENTRAL_1", + Priority: pointer.MakePtr(7), + ElectableSpecs: &akov2.Specs{ + InstanceSize: "M30", + NodeCount: pointer.MakePtr(3), + }, + }, + }, + }, + }, + } + + Expect(kubeClient.Update(ctx, &deployment)).To(Succeed()) + Eventually(func(g Gomega) { + updatedDeployment := akov2.AtlasDeployment{} + g.Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-cluster"}, &updatedDeployment)).To(Succeed()) + for _, c := range updatedDeployment.GetStatus().GetConditions() { + switch c.Type { + case "Ready": + g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) + g.Expect(c.Message).To(Equal("Cluster is already dedicated. It’s recommended to remove or set the upgrade flag to false")) + default: + g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) + } + } + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("Delete resources", func() { + var deployment akov2.AtlasDeployment + Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-cluster"}, &deployment)).To(Succeed()) + Expect(kubeClient.Delete(ctx, &deployment)).To(Succeed()) + + var project akov2.AtlasProject + Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-project"}, &project)).To(Succeed()) + Expect(kubeClient.Delete(ctx, &project)).To(Succeed()) + + Eventually(func(g Gomega) { + g.Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-project"}, &project)).ToNot(Succeed()) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) + }) + + DescribeTable( + "Should handle invalid upgrade scenarios", + func(ctx context.Context, project *akov2.AtlasProject, deployment *akov2.AtlasDeployment, updateDeployment *akov2.AtlasDeployment, errorMessage string) { + By("Create a secret with Atlas credentials", func() { + k8s.CreateDefaultSecret(ctx, kubeClient, resourcePrefix+"-secret", testNamespace.Name) + }) + + By("Create Atlas Project", func() { + project.WithName(resourcePrefix + "-project") + project.WithAtlasName(resourcePrefix + "-project") + project.WithConnectionSecret(resourcePrefix + "-secret") + project.Namespace = testNamespace.Name + + Expect(kubeClient.Create(ctx, project)).To(Succeed()) + + Eventually(func(g Gomega) { + condition, err := k8s.GetProjectStatusCondition(ctx, kubeClient, api.ReadyType, testNamespace.Name, resourcePrefix+"-project") + g.Expect(err).To(BeNil()) + g.Expect(condition).To(Equal("True")) + }).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + }) + + By("Create a cluster", func() { + deployment.WithName(resourcePrefix + "-cluster") + deployment.WithAtlasName(resourcePrefix + "-cluster") + deployment.WithProjectName(resourcePrefix + "-project") + deployment.Namespace = testNamespace.Name + + Expect(kubeClient.Create(ctx, deployment)).To(Succeed()) + Eventually(func(g Gomega) { + condition, err := k8s.GetDeploymentStatusCondition(ctx, kubeClient, api.ReadyType, testNamespace.Name, resourcePrefix+"-cluster") + g.Expect(err).To(BeNil()) + g.Expect(condition).To(Equal("True")) + }).WithTimeout(10 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + }) + + By("Upgrade cluster to Dedicated cluster", func() { + Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-cluster"}, deployment)).To(Succeed()) + + updateDeployment.ObjectMeta = deployment.ObjectMeta + updateDeployment.WithAtlasName(resourcePrefix + "-cluster") + updateDeployment.WithProjectName(resourcePrefix + "-project") + updateDeployment.Namespace = testNamespace.Name + updateDeployment.Spec.UpgradeToDedicated = true + updateDeployment.Spec.DeploymentSpec.ClusterType = "SHARDED" + + Expect(kubeClient.Update(ctx, updateDeployment)).To(Succeed()) + Eventually(func(g Gomega) { + updatedDeployment := akov2.AtlasDeployment{} + g.Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-cluster"}, &updatedDeployment)).To(Succeed()) + for _, c := range updatedDeployment.GetStatus().GetConditions() { + if c.Type == "DeploymentReady" { + g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(c.Reason).To(Equal("DedicatedMigrationFailed")) + g.Expect(c.Message).To(ContainSubstring(errorMessage)) + } + } + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("Delete resources", func() { + Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-cluster"}, deployment)).To(Succeed()) + Expect(kubeClient.Delete(ctx, deployment)).To(Succeed()) + + Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-project"}, project)).To(Succeed()) + Expect(kubeClient.Delete(ctx, project)).To(Succeed()) + + Eventually(func(g Gomega) { + g.Expect(kubeClient.Get(ctx, client.ObjectKey{Namespace: testNamespace.Name, Name: resourcePrefix + "-project"}, project)).ToNot(Succeed()) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) + }, + Entry( + "Cannot upgrade a shared cluster to dedicated", + utils.ParseObjectFromYAMLFile("../helper/e2e/data/atlasproject.yaml", &akov2.AtlasProject{}), + utils.ParseObjectFromYAMLFile("../helper/e2e/data/atlasdeployment_basic_free.yaml", &akov2.AtlasDeployment{}), + utils.ParseObjectFromYAMLFile("../helper/e2e/data/atlasdeployment_standard.yaml", &akov2.AtlasDeployment{}), + "failed to upgrade cluster: upgrade from shared to dedicated is not supported", + ), + Entry( + "Cannot upgrade a flex cluster to dedicated with wrong spec", + utils.ParseObjectFromYAMLFile("../helper/e2e/data/atlasproject.yaml", &akov2.AtlasProject{}), + utils.ParseObjectFromYAMLFile("../helper/e2e/data/atlasdeployment_flex.yaml", &akov2.AtlasDeployment{}), + utils.ParseObjectFromYAMLFile("../helper/e2e/data/atlasdeployment_standard.yaml", &akov2.AtlasDeployment{}), + "Cannot upgrade a shared-tier cluster to a sharded cluster. Please upgrade to a dedicated replica set before converting to a sharded cluster", + ), + ) +}) diff --git a/test/helper/e2e/data/atlasdeployment_serverless.yaml b/test/helper/e2e/data/atlasdeployment_serverless.yaml new file mode 100644 index 0000000000..d257e6d2e6 --- /dev/null +++ b/test/helper/e2e/data/atlasdeployment_serverless.yaml @@ -0,0 +1,13 @@ +apiVersion: atlas.mongodb.com/v1 +kind: AtlasDeployment +metadata: + name: atlas-deployment-serverless +spec: + projectRef: + name: my-project + serverlessSpec: + name: serverless-instance + providerSettings: + providerName: "SERVERLESS" + backingProviderName: AWS + regionName: US_EAST_1 diff --git a/test/helper/e2e/data/atlasdeployment_standard.yaml b/test/helper/e2e/data/atlasdeployment_standard.yaml index 54112d9c87..9e5ca9758d 100644 --- a/test/helper/e2e/data/atlasdeployment_standard.yaml +++ b/test/helper/e2e/data/atlasdeployment_standard.yaml @@ -17,4 +17,4 @@ spec: priority: 7 electableSpecs: instanceSize: M10 - nodeCount: 1 + nodeCount: 3 diff --git a/test/helper/e2e/k8s/binary.go b/test/helper/e2e/k8s/binary.go new file mode 100644 index 0000000000..8191282788 --- /dev/null +++ b/test/helper/e2e/k8s/binary.go @@ -0,0 +1,134 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "syscall" + + "github.com/onsi/ginkgo/v2/dsl/core" +) + +const ( + DefaultDelveListenPort = ":2345" +) + +func NoGoRunEnvSet() bool { + envSet, _ := strconv.ParseBool(os.Getenv("NO_GORUN")) + return envSet +} + +func RunDelveEnvSet() bool { + envSet, _ := strconv.ParseBool(os.Getenv("RUN_DELVE")) + return envSet +} + +func RunManagerBinary(deletionProtection bool) (*exec.Cmd, error) { + args := []string{ + "--log-level=-9", + fmt.Sprintf("--object-deletion-protection=%v", deletionProtection), + "--log-encoder=console", + `--atlas-domain=https://cloud-qa.mongodb.com`, + } + + cmdLine := append(operatorCommand(), args...) + //nolint:gosec + cmd := exec.Command(cmdLine[0], cmdLine[1:]...) + + // works around https://github.com/golang/go/issues/40467 + // to be able to propagate SIGTERM to the child process. + // See https://medium.com/@felixge/killing-a-child-process-and-all-of-its-children-in-go-54079af94773 + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + + cmd.Stdout = core.GinkgoWriter + cmd.Stderr = core.GinkgoWriter + cmd.Env = append( + os.Environ(), + `OPERATOR_NAMESPACE=mongodb-atlas-system`, + `OPERATOR_POD_NAME=mongodb-atlas-operator`, + ) + + if err := cmd.Start(); err != nil { + return nil, err + } + + return cmd, nil +} + +func operatorCommand() []string { + operatorBinary := envVarOrDefault("AKO_BINARY", filepath.Join("bin", "manager")) + if RunDelveEnvSet() { + return []string{ + "dlv", "exec", + "--api-version=2", + "--headless=true", + fmt.Sprintf("--listen=%s", envVarOrDefault("DELVE_LISTEN", DefaultDelveListenPort)), + filepath.Join(repositoryDir(), operatorBinary), + "--", + } + } + + if NoGoRunEnvSet() { + return []string{filepath.Join(repositoryDir(), operatorBinary)} + } + + if os.Getenv("EXPERIMENTAL") == "true" { + return []string{ + "go", + "run", + "-ldflags=-X github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version.Experimental=true", + filepath.Join(repositoryDir(), "cmd"), + } + } + + return []string{"go", "run", filepath.Join(repositoryDir(), "cmd")} +} + +func envVarOrDefault(name, defaultValue string) string { + value, ok := os.LookupEnv(name) + if ok { + return value + } + return defaultValue +} + +func repositoryDir() string { + // Caller(0) returns the path to the calling test file rather than the path to this framework file. That + // precludes assuming how many directories are between the file and the repo root. It's therefore necessary + // to search in the hierarchy for an indication of a path that looks like the repo root. + //nolint:dogsled + _, sourceFile, _, _ := runtime.Caller(0) + currentDir := filepath.Dir(sourceFile) + for { + // go.mod should always exist in the repo root + if _, err := os.Stat(filepath.Join(currentDir, "go.mod")); err == nil { + break + } else if errors.Is(err, os.ErrNotExist) { + currentDir, err = filepath.Abs(filepath.Join(currentDir, "..")) + if err != nil { + panic(err) + } + } else { + panic(err) + } + } + return currentDir +} diff --git a/test/helper/e2e/utils/utils.go b/test/helper/e2e/utils/utils.go index 0138226921..2854a8b8c4 100644 --- a/test/helper/e2e/utils/utils.go +++ b/test/helper/e2e/utils/utils.go @@ -104,6 +104,12 @@ func ReadInYAMLFileAndConvert(pathToYamlFile string, cnfg interface{}) interface return cnfg } +func ParseObjectFromYAMLFile[T akov2.AtlasCustomResource](pathToYamlFile string, obj T) T { + ReadInYAMLFileAndConvert(pathToYamlFile, obj) + + return obj +} + // ConvertYAMLtoJSONHelper converts the yaml to json recursively func ConvertYAMLtoJSONHelper(i interface{}) interface{} { switch item := i.(type) {