diff --git a/CHANGELOG.md b/CHANGELOG.md index f4954308..b3f30e4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - [#121] Added use case to check if dogus actually use the desired version and config before completing the blueprint +- [#129] Reconciliation of the blueprint on changes of dogu-crs, ces-configMaps and ces-secrets ### Changed - [#119] *breaking* sensitive dogu config can now only be referenced with secrets diff --git a/k8s/helm/templates/deployment.yaml b/k8s/helm/templates/deployment.yaml index fb1bcf79..2c9871cf 100644 --- a/k8s/helm/templates/deployment.yaml +++ b/k8s/helm/templates/deployment.yaml @@ -68,6 +68,8 @@ spec: name: ces-proxy key: url optional: true + - name: DEBOUNCE_WINDOW + value: {{ quote .Values.manager.reconciler.debounceWindow | default "10s" }} image: "{{ .Values.manager.image.registry }}/{{ .Values.manager.image.repository }}:{{ .Values.manager.image.tag | default .Chart.AppVersion }}" livenessProbe: httpGet: diff --git a/k8s/helm/templates/manager-role.yaml b/k8s/helm/templates/manager-role.yaml index 8967a42d..452733ec 100644 --- a/k8s/helm/templates/manager-role.yaml +++ b/k8s/helm/templates/manager-role.yaml @@ -15,10 +15,26 @@ rules: resources: - secrets verbs: - - create - - update - get + - list # needed, as the registry-lib seems to need that for a normal get command + - watch # issue PVC read-only permissions to amend the Blueprint CR with a currently configured Dogu volume size + - apiGroups: + - "" + resources: + - configmaps # for normal dogu config + verbs: + - get + - list # needed, as the registry-lib seems to need that for a normal get command + - watch + - apiGroups: + - k8s.cloudogu.com + resources: + - dogus + verbs: + - get + - list + - watch - apiGroups: - "" resources: diff --git a/k8s/helm/values.yaml b/k8s/helm/values.yaml index 26aa480c..aa85a687 100644 --- a/k8s/helm/values.yaml +++ b/k8s/helm/values.yaml @@ -18,6 +18,8 @@ manager: memory: 105M networkPolicies: enabled: true + reconciler: + debounceWindow: 10s doguRegistry: certificate: secret: dogu-registry-cert diff --git a/main.go b/main.go index 15acf781..66674a15 100644 --- a/main.go +++ b/main.go @@ -11,6 +11,7 @@ import ( "github.com/cloudogu/k8s-blueprint-operator/v2/pkg" "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/adapter/reconciler" "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/config" + v2 "github.com/cloudogu/k8s-dogu-lib/v2/api/v2" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -46,6 +47,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(bpv2.AddToScheme(scheme)) + utilruntime.Must(v2.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } diff --git a/main_internal_test.go b/main_internal_test.go index 6bf7baee..6f7605df 100644 --- a/main_internal_test.go +++ b/main_internal_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/go-logr/logr" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -181,6 +180,7 @@ func Test_startOperator(t *testing.T) { t.Setenv("DOGU_REGISTRY_ENDPOINT", "dogu.example.com") t.Setenv("DOGU_REGISTRY_USERNAME", "user") t.Setenv("DOGU_REGISTRY_PASSWORD", "password") + t.Setenv("DEBOUNCE_WINDOW", "10s") oldNewManagerFunc := ctrl.NewManager oldGetConfigFunc := ctrl.GetConfigOrDie @@ -196,6 +196,7 @@ func Test_startOperator(t *testing.T) { //ctrlManMock.EXPECT().GetConfig().Return(restConfig) ctrlManMock.EXPECT().GetControllerOptions().Return(config.Controller{}) ctrlManMock.EXPECT().GetScheme().Return(runtime.NewScheme()) + ctrlManMock.EXPECT().GetCache().Return(nil) ctrl.NewManager = func(config *rest.Config, options manager.Options) (manager.Manager, error) { return ctrlManMock, nil @@ -220,6 +221,7 @@ func Test_startOperator(t *testing.T) { t.Setenv("DOGU_REGISTRY_ENDPOINT", "dogu.example.com") t.Setenv("DOGU_REGISTRY_USERNAME", "user") t.Setenv("DOGU_REGISTRY_PASSWORD", "password") + t.Setenv("DEBOUNCE_WINDOW", "10s") oldNewManagerFunc := ctrl.NewManager oldGetConfigFunc := ctrl.GetConfigOrDie @@ -268,6 +270,7 @@ func Test_startOperator(t *testing.T) { t.Setenv("DOGU_REGISTRY_ENDPOINT", "dogu.example.com") t.Setenv("DOGU_REGISTRY_USERNAME", "user") t.Setenv("DOGU_REGISTRY_PASSWORD", "password") + t.Setenv("DEBOUNCE_WINDOW", "10s") oldNewManagerFunc := ctrl.NewManager oldGetConfigFunc := ctrl.GetConfigOrDie @@ -317,6 +320,7 @@ func Test_startOperator(t *testing.T) { t.Setenv("DOGU_REGISTRY_ENDPOINT", "dogu.example.com") t.Setenv("DOGU_REGISTRY_USERNAME", "user") t.Setenv("DOGU_REGISTRY_PASSWORD", "password") + t.Setenv("DEBOUNCE_WINDOW", "10s") oldNewManagerFunc := ctrl.NewManager oldGetConfigFunc := ctrl.GetConfigOrDie @@ -372,6 +376,7 @@ func Test_startOperator(t *testing.T) { t.Setenv("DOGU_REGISTRY_ENDPOINT", "dogu.example.com") t.Setenv("DOGU_REGISTRY_USERNAME", "user") t.Setenv("DOGU_REGISTRY_PASSWORD", "password") + t.Setenv("DEBOUNCE_WINDOW", "10s") oldNewManagerFunc := ctrl.NewManager oldGetConfigFunc := ctrl.GetConfigOrDie diff --git a/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository.go b/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository.go index deafe1d7..a4c00d18 100644 --- a/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository.go +++ b/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository.go @@ -108,6 +108,22 @@ func (repo *blueprintSpecRepo) Count(ctx context.Context, limit int) (int, error return len(list.Items), nil } +func (repo *blueprintSpecRepo) ListIds(ctx context.Context) ([]string, error) { + list, err := repo.blueprintClient.List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, &domainservice.InternalError{ + WrappedError: err, + Message: "error while listing blueprint resources", + } + } + + result := make([]string, len(list.Items)) + for index, blueprint := range list.Items { + result[index] = blueprint.Name + } + return result, nil +} + // Update persists changes in the blueprint to the corresponding blueprint CR. func (repo *blueprintSpecRepo) Update(ctx context.Context, spec *domain.BlueprintSpec) error { logger := log.FromContext(ctx).WithName("blueprintSpecRepo.Update") diff --git a/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository_test.go b/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository_test.go index 7d14d958..b0756799 100644 --- a/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository_test.go +++ b/pkg/adapter/kubernetes/blueprintcr/v2/blueprintSpecCRRepository_test.go @@ -481,3 +481,52 @@ func Test_blueprintSpecRepo_Count(t *testing.T) { assert.ErrorContains(t, err, "error while listing blueprint resources") }) } + +func Test_blueprintSpecRepo_ListIds(t *testing.T) { + t.Run("success", func(t *testing.T) { + restClientMock := newMockBlueprintInterface(t) + repo := NewBlueprintSpecRepository(restClientMock, nil) + + returnList := bpv2.BlueprintList{Items: []bpv2.Blueprint{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test2", + }, + }, + }} + + restClientMock.EXPECT().List(ctx, metav1.ListOptions{}).Return(&returnList, nil) + + // when + idList, err := repo.ListIds(ctx) + + // then + require.NoError(t, err) + assert.Len(t, idList, 2) + assert.Contains(t, idList, "test1") + assert.Contains(t, idList, "test2") + }) + + t.Run("throw internal error on list error", func(t *testing.T) { + restClientMock := newMockBlueprintInterface(t) + repo := NewBlueprintSpecRepository(restClientMock, nil) + + restClientMock.EXPECT().List(ctx, metav1.ListOptions{}).Return(nil, assert.AnError) + + // when + list, err := repo.ListIds(ctx) + + // then + require.Error(t, err) + assert.Nil(t, list) + var targetErr *domainservice.InternalError + assert.ErrorAs(t, err, &targetErr) + assert.ErrorContains(t, err, "error while listing blueprint resources") + }) + +} diff --git a/pkg/adapter/reconciler/blueprint_controller.go b/pkg/adapter/reconciler/blueprint_controller.go index b50a80a8..32f9e456 100644 --- a/pkg/adapter/reconciler/blueprint_controller.go +++ b/pkg/adapter/reconciler/blueprint_controller.go @@ -3,31 +3,47 @@ package reconciler import ( "context" "errors" - "fmt" "time" + v2 "github.com/cloudogu/k8s-dogu-lib/v2/api/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/go-logr/logr" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/source" bpv2 "github.com/cloudogu/k8s-blueprint-lib/v2/api/v2" - "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domain" "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domainservice" ) // BlueprintReconciler reconciles a Blueprint object type BlueprintReconciler struct { blueprintChangeHandler BlueprintChangeHandler + blueprintRepo BlueprintSpecRepository + namespace string + debounce SingletonDebounce + window time.Duration + errorHandler *ErrorHandler } func NewBlueprintReconciler( blueprintChangeHandler BlueprintChangeHandler, + repo domainservice.BlueprintSpecRepository, + namespace string, + window time.Duration, ) *BlueprintReconciler { - return &BlueprintReconciler{blueprintChangeHandler: blueprintChangeHandler} + return &BlueprintReconciler{ + blueprintChangeHandler: blueprintChangeHandler, + blueprintRepo: repo, + namespace: namespace, + debounce: SingletonDebounce{}, + window: window, + errorHandler: NewErrorHandler(), + } } // +kubebuilder:rbac:groups=k8s.cloudogu.com,resources=blueprints,verbs=get;watch;update;patch @@ -46,68 +62,20 @@ func (r *BlueprintReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( err := r.blueprintChangeHandler.CheckForMultipleBlueprintResources(ctx) if err != nil { - return decideRequeueForError(logger, err) + return r.errorHandler.handleError(logger, err) } err = r.blueprintChangeHandler.HandleUntilApplied(ctx, req.Name) if err != nil { - return decideRequeueForError(logger, err) + return r.errorHandler.handleError(logger, err) } - return ctrl.Result{}, nil -} - -func decideRequeueForError(logger logr.Logger, err error) (ctrl.Result, error) { - errLogger := logger.WithValues("error", err) - - var internalError *domainservice.InternalError - var conflictError *domainservice.ConflictError - var notFoundError *domainservice.NotFoundError - var invalidBlueprintError *domain.InvalidBlueprintError - var healthError *domain.UnhealthyEcosystemError - var stateDiffNotEmptyError *domain.StateDiffNotEmptyError - var multipleBlueprintsError *domain.MultipleBlueprintsError - var dogusNotUpToDateError *domain.DogusNotUpToDateError - switch { - case errors.As(err, &internalError): - errLogger.Error(err, "An internal error occurred and can maybe be fixed by retrying it later") - return ctrl.Result{}, err // automatic requeue because of non-nil err - case errors.As(err, &conflictError): - errLogger.Info("A concurrent update happened in conflict to the processing of the blueprint spec. A retry could fix this issue") - return ctrl.Result{RequeueAfter: 1 * time.Second}, nil // no error as this would lead to the ignorance of our own retry params - case errors.As(err, ¬FoundError): - if notFoundError.DoNotRetry { - // do not retry in this case, because if f.e. the blueprint is not found, nothing will bring it back, except the - // user, and this would trigger the reconciler by itself. - logger.Error(err, "Did not find resource and a retry is not expected to fix this issue. There will be no further automatic evaluation.") - return ctrl.Result{}, nil - } - logger.Error(err, "Resource was not found, so maybe it was deleted in the meantime. Retry later") - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - case errors.As(err, &invalidBlueprintError): - errLogger.Info("Blueprint is invalid, therefore there will be no further evaluation.") - return ctrl.Result{}, nil - case errors.As(err, &healthError): - // really normal case - errLogger.Info("Ecosystem is unhealthy. Retry later") - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - case errors.As(err, &stateDiffNotEmptyError): - errLogger.Info("requeue until state diff is empty") - // fast requeue here since state diff has to be determined again - return ctrl.Result{RequeueAfter: 1 * time.Second}, nil - case errors.As(err, &multipleBlueprintsError): - errLogger.Error(err, "Ecosystem contains multiple blueprints - delete all except one. Retry later") - // fast requeue here since state diff has to be determined again - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - case errors.As(err, &dogusNotUpToDateError): - // really normal case - errLogger.Info(fmt.Sprintf("Dogus are not up to date yet. Retry later: %s", err.Error())) - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - default: - errLogger.Error(err, "An unknown error type occurred. Retry with default backoff") - return ctrl.Result{}, err // automatic requeue because of non-nil err + // Schedule a reconciliation after the cooldown period if there is one pending. + if requeue, after := r.debounce.ShouldRequeue(); requeue { + return ctrl.Result{RequeueAfter: after}, nil } + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. @@ -122,9 +90,72 @@ func (r *BlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error { RecoverPanic: controllerOptions.RecoverPanic, NeedLeaderElection: controllerOptions.NeedLeaderElection, } + return ctrl.NewControllerManagedBy(mgr). WithEventFilter(predicate.GenerationChangedPredicate{}). WithOptions(options). For(&bpv2.Blueprint{}). + WatchesRawSource(r.getConfigMapKind(mgr)). + WatchesRawSource(r.getSecretKind(mgr)). + WatchesRawSource(r.getDoguKind(mgr)). Complete(r) } + +func (r *BlueprintReconciler) getConfigMapKind(mgr ctrl.Manager) source.TypedSyncingSource[reconcile.Request] { + return source.TypedKind( + mgr.GetCache(), + &corev1.ConfigMap{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, cm *corev1.ConfigMap) []reconcile.Request { + return r.getBlueprintRequest(ctx) + }), + predicate.And( + makeResourcePredicate[*corev1.ConfigMap](r.hasOperatorNamespace), + makeResourcePredicate[*corev1.ConfigMap](hasCesLabel), + makeResourcePredicate[*corev1.ConfigMap](hasNotDoguDescriptorLabel), + makeContentPredicate(&r.debounce, r.window, configMapContentChanged), + ), + ) +} + +func (r *BlueprintReconciler) getSecretKind(mgr ctrl.Manager) source.TypedSyncingSource[reconcile.Request] { + return source.TypedKind( + mgr.GetCache(), + &corev1.Secret{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, s *corev1.Secret) []reconcile.Request { + return r.getBlueprintRequest(ctx) + }), + predicate.And( + makeResourcePredicate[*corev1.Secret](r.hasOperatorNamespace), + makeContentPredicate(&r.debounce, r.window, secretContentChanged), + ), + ) +} + +func (r *BlueprintReconciler) getDoguKind(mgr ctrl.Manager) source.TypedSyncingSource[reconcile.Request] { + return source.TypedKind( + mgr.GetCache(), + &v2.Dogu{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, d *v2.Dogu) []reconcile.Request { + return r.getBlueprintRequest(ctx) + }), + predicate.And( + makeResourcePredicate[*v2.Dogu](r.hasOperatorNamespace), + makeContentPredicate(&r.debounce, r.window, doguSpecChanged), + ), + ) +} + +func (r *BlueprintReconciler) getBlueprintRequest(ctx context.Context) []reconcile.Request { + idList, err := r.blueprintRepo.ListIds(ctx) + if err != nil || len(idList) != 1 { + return nil + } + + blueprintRequest := []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Name: idList[0], + Namespace: r.namespace, + }, + }} + return blueprintRequest +} diff --git a/pkg/adapter/reconciler/blueprint_controller_test.go b/pkg/adapter/reconciler/blueprint_controller_test.go index 86fd44c0..c5f388bd 100644 --- a/pkg/adapter/reconciler/blueprint_controller_test.go +++ b/pkg/adapter/reconciler/blueprint_controller_test.go @@ -3,17 +3,14 @@ package reconciler import ( "context" "errors" - "fmt" "testing" "time" - "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domain" "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domainservice" - "github.com/go-logr/logr" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -30,8 +27,9 @@ var testCtx = context.Background() const testBlueprint = "test-blueprint" func TestNewBlueprintReconciler(t *testing.T) { - reconciler := NewBlueprintReconciler(nil) + reconciler := NewBlueprintReconciler(nil, nil, "", time.Duration(0)) assert.NotNil(t, reconciler) + assert.NotNil(t, reconciler.errorHandler) } func TestBlueprintReconciler_SetupWithManager(t *testing.T) { @@ -94,7 +92,7 @@ func TestBlueprintReconciler_Reconcile(t *testing.T) { assert.Equal(t, ctrl.Result{}, actual) }) - t.Run("should succeed", func(t *testing.T) { + t.Run("should fail on multiple blueprint resource error", func(t *testing.T) { // given request := ctrl.Request{NamespacedName: types.NamespacedName{Name: testBlueprint}} changeHandlerMock := NewMockBlueprintChangeHandler(t) @@ -124,175 +122,113 @@ func TestBlueprintReconciler_Reconcile(t *testing.T) { require.Error(t, err) assert.ErrorContains(t, err, "test") }) -} -func Test_decideRequeueForError(t *testing.T) { - t.Run("should catch wrapped InternalError, issue a log line and requeue with error", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) + t.Run("should handle error with requeue", func(t *testing.T) { + mockHandler := NewMockBlueprintChangeHandler(t) + mockRepo := NewMockBlueprintSpecRepository(t) - intermediateErr := domainservice.NewInternalError(assert.AnError, "a generic oh-noez") - errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + reconciler := NewBlueprintReconciler(mockHandler, mockRepo, "test-namespace", 5*time.Second) - // when - actual, err := decideRequeueForError(testLogger, errorChain) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-blueprint", + Namespace: "test-namespace", + }, + } - // then - require.Error(t, err) - assert.Equal(t, ctrl.Result{}, actual) - assert.Contains(t, logSinkMock.output, "0: An internal error occurred and can maybe be fixed by retrying it later") - }) - t.Run("should catch wrapped ConflictError, issue a log line and requeue timely", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) + ctx := context.TODO() + testErr := &domainservice.ConflictError{Message: "conflict error"} - intermediateErr := &domainservice.ConflictError{ - WrappedError: assert.AnError, - Message: "a generic oh-noez", - } - errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + mockHandler.EXPECT().CheckForMultipleBlueprintResources(ctx).Return(nil) + mockHandler.EXPECT().HandleUntilApplied(ctx, "test-blueprint").Return(testErr) - // when - actual, err := decideRequeueForError(testLogger, errorChain) + result, err := reconciler.Reconcile(ctx, req) - // then - require.NoError(t, err) - assert.Equal(t, ctrl.Result{RequeueAfter: 1 * time.Second}, actual) - assert.Contains(t, logSinkMock.output, "0: A concurrent update happened in conflict to the processing of the blueprint spec. A retry could fix this issue") + assert.NoError(t, err) // Error should be handled by ErrorHandler + assert.Equal(t, ctrl.Result{RequeueAfter: 1 * time.Second}, result) }) - t.Run("should catch wrapped NotFoundError, issue a log line and requeue", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) - intermediateErr := &domainservice.NotFoundError{ - WrappedError: assert.AnError, - Message: "a generic oh-noez", - } - errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + t.Run("should reconcile on pending change", func(t *testing.T) { + mockHandler := NewMockBlueprintChangeHandler(t) + mockRepo := NewMockBlueprintSpecRepository(t) - // when - actual, err := decideRequeueForError(testLogger, errorChain) + reconciler := NewBlueprintReconciler(mockHandler, mockRepo, "test-namespace", 5*time.Second) - // then - require.NoError(t, err) - assert.Equal(t, ctrl.Result{RequeueAfter: 10 * time.Second}, actual) - assert.Contains(t, logSinkMock.output, "0: Resource was not found, so maybe it was deleted in the meantime. Retry later") - }) - t.Run("should catch wrapped MultipleBlueprintsError, issue a error log line and requeue", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) + // Set up debounce to have pending request + reconciler.debounce.AllowOrMark(1 * time.Second) + reconciler.debounce.AllowOrMark(1 * time.Second) // This marks as pending + assert.True(t, reconciler.debounce.pending) - intermediateErr := &domain.MultipleBlueprintsError{ - Message: "multiple blueprints found", + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-blueprint", + Namespace: "test-namespace", + }, } - errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) - // when - actual, err := decideRequeueForError(testLogger, errorChain) + ctx := context.TODO() - // then - require.NoError(t, err) - assert.Equal(t, ctrl.Result{RequeueAfter: 10 * time.Second}, actual) - assert.Contains(t, logSinkMock.output, "0: Ecosystem contains multiple blueprints - delete all except one. Retry later") + mockHandler.EXPECT().CheckForMultipleBlueprintResources(ctx).Return(nil) + mockHandler.EXPECT().HandleUntilApplied(ctx, "test-blueprint").Return(nil) + + result, err := reconciler.Reconcile(ctx, req) + + assert.NoError(t, err) + assert.True(t, result.RequeueAfter > 0) }) - t.Run("NotFoundError, should not retry if DoNotRetry-Flag is set", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) +} - intermediateErr := &domainservice.NotFoundError{ - WrappedError: assert.AnError, - Message: "Blueprint does not exist", - DoNotRetry: true, - } - errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) +func TestBlueprintReconciler_getBlueprintRequest(t *testing.T) { + ctx := context.TODO() - // when - actual, err := decideRequeueForError(testLogger, errorChain) + t.Run("one blueprint gets successful request", func(t *testing.T) { + idList := []string{"test-blueprint"} - // then - require.NoError(t, err) - assert.Equal(t, ctrl.Result{}, actual) - assert.Contains(t, logSinkMock.output, "0: Did not find resource and a retry is not expected to fix this issue. There will be no further automatic evaluation.") - }) - t.Run("should catch wrapped InvalidBlueprintError, issue a log line and do not requeue", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) + mockRepo := NewMockBlueprintSpecRepository(t) + mockRepo.EXPECT().ListIds(ctx).Return(idList, nil) - intermediateErr := &domain.InvalidBlueprintError{ - WrappedError: assert.AnError, - Message: "a generic oh-noez", - } - errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + reconciler := &BlueprintReconciler{blueprintRepo: mockRepo, namespace: "test-namespace"} + result := reconciler.getBlueprintRequest(ctx) - // when - actual, err := decideRequeueForError(testLogger, errorChain) + expected := []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Name: "test-blueprint", + Namespace: "test-namespace", + }, + }} - // then - require.NoError(t, err) - assert.Equal(t, ctrl.Result{}, actual) - assert.Contains(t, logSinkMock.output, "0: Blueprint is invalid, therefore there will be no further evaluation.") + assert.Equal(t, expected, result) }) - t.Run("should catch wrapped StateDiffNotEmptyError, issue a log line and requeue timely", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) - intermediateErr := &domain.StateDiffNotEmptyError{ - Message: "a generic oh-noez", - } - errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + t.Run("no reconcile request on error from repository", func(t *testing.T) { + mockRepo := NewMockBlueprintSpecRepository(t) + mockRepo.EXPECT().ListIds(ctx).Return(nil, errors.New("repo error")) - // when - actual, err := decideRequeueForError(testLogger, errorChain) + reconciler := &BlueprintReconciler{blueprintRepo: mockRepo} + result := reconciler.getBlueprintRequest(ctx) - // then - require.NoError(t, err) - assert.Equal(t, ctrl.Result{RequeueAfter: 1 * time.Second}, actual) - assert.Contains(t, logSinkMock.output, "0: requeue until state diff is empty") + assert.Nil(t, result) }) - t.Run("should catch general errors, issue a log line and return requeue with error", func(t *testing.T) { - // given - logSinkMock := newTrivialTestLogSink() - testLogger := logr.New(logSinkMock) - errorChain := fmt.Errorf("everything goes down the drain: %w", assert.AnError) + t.Run("no reconcile request when no blueprints", func(t *testing.T) { + mockRepo := NewMockBlueprintSpecRepository(t) + mockRepo.EXPECT().ListIds(ctx).Return([]string{}, nil) - // when - actual, err := decideRequeueForError(testLogger, errorChain) + reconciler := &BlueprintReconciler{blueprintRepo: mockRepo} + result := reconciler.getBlueprintRequest(ctx) - // then - require.Error(t, err) - assert.Equal(t, ctrl.Result{}, actual) - assert.Contains(t, logSinkMock.output, "0: An unknown error type occurred. Retry with default backoff") + assert.Nil(t, result) }) -} -type testLogSink struct { - output []string - r logr.RuntimeInfo -} + t.Run("no reconcile request when multiple blueprints", func(t *testing.T) { + idList := []string{"bp1", "bp2"} -func newTrivialTestLogSink() *testLogSink { - var output []string - return &testLogSink{output: output, r: logr.RuntimeInfo{CallDepth: 1}} -} + mockRepo := NewMockBlueprintSpecRepository(t) + mockRepo.EXPECT().ListIds(ctx).Return(idList, nil) -func (t *testLogSink) doLog(level int, msg string, _ ...interface{}) { - t.output = append(t.output, fmt.Sprintf("%d: %s", level, msg)) -} -func (t *testLogSink) Init(info logr.RuntimeInfo) { t.r = info } -func (t *testLogSink) Enabled(int) bool { return true } -func (t *testLogSink) Info(level int, msg string, keysAndValues ...interface{}) { - t.doLog(level, msg, keysAndValues...) -} -func (t *testLogSink) Error(err error, msg string, keysAndValues ...interface{}) { - t.doLog(0, msg, append(keysAndValues, err)...) + reconciler := &BlueprintReconciler{blueprintRepo: mockRepo} + result := reconciler.getBlueprintRequest(ctx) + + assert.Nil(t, result) + }) } -func (t *testLogSink) WithValues(...interface{}) logr.LogSink { return t } -func (t *testLogSink) WithName(string) logr.LogSink { return t } diff --git a/pkg/adapter/reconciler/error_handler.go b/pkg/adapter/reconciler/error_handler.go new file mode 100644 index 00000000..a74446a7 --- /dev/null +++ b/pkg/adapter/reconciler/error_handler.go @@ -0,0 +1,108 @@ +package reconciler + +import ( + "errors" + "fmt" + "time" + + "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domain" + "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domainservice" + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" +) + +// ErrorHandler handles different types of errors and determines the appropriate requeue strategy. +type ErrorHandler struct{} + +// NewErrorHandler creates a new ErrorHandler instance. +func NewErrorHandler() *ErrorHandler { + return &ErrorHandler{} +} + +// handleError processes an error and returns the appropriate reconcile result. +func (h *ErrorHandler) handleError(logger logr.Logger, err error) (ctrl.Result, error) { + errLogger := logger.WithValues("error", err) + + var internalError *domainservice.InternalError + var conflictError *domainservice.ConflictError + var notFoundError *domainservice.NotFoundError + var invalidBlueprintError *domain.InvalidBlueprintError + var healthError *domain.UnhealthyEcosystemError + var stateDiffNotEmptyError *domain.StateDiffNotEmptyError + var multipleBlueprintsError *domain.MultipleBlueprintsError + var dogusNotUpToDateError *domain.DogusNotUpToDateError + switch { + case errors.As(err, &internalError): + return h.handleInternalError(errLogger, err) + case errors.As(err, &conflictError): + return h.handleConflictError(errLogger) + case errors.As(err, ¬FoundError): + return h.handleNotFoundError(errLogger, notFoundError) + case errors.As(err, &invalidBlueprintError): + return h.handleInvalidBlueprintError(errLogger) + case errors.As(err, &healthError): + return h.handleHealthError(errLogger) + case errors.As(err, &stateDiffNotEmptyError): + return h.handleStateDiffNotEmptyError(errLogger) + case errors.As(err, &multipleBlueprintsError): + return h.handleMultipleBlueprintsError(errLogger, err) + case errors.As(err, &dogusNotUpToDateError): + return h.handleDogusNotUpToDateError(errLogger, err) + default: + return h.handleUnknownError(errLogger, err) + } +} + +func (h *ErrorHandler) handleInternalError(logger logr.Logger, err error) (ctrl.Result, error) { + logger.Error(err, "An internal error occurred and can maybe be fixed by retrying it later") + return ctrl.Result{}, err // automatic requeue because of non-nil err +} + +func (h *ErrorHandler) handleConflictError(logger logr.Logger) (ctrl.Result, error) { + logger.Info("A concurrent update happened in conflict to the processing of the blueprint spec. A retry could fix this issue") + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil // no error as this would lead to the ignorance of our own retry params +} + +func (h *ErrorHandler) handleNotFoundError(logger logr.Logger, err *domainservice.NotFoundError) (ctrl.Result, error) { + if err.DoNotRetry { + // do not retry in this case, because if f.e. the blueprint is not found, nothing will bring it back, except the + // user, and this would trigger the reconciler by itself. + logger.Error(err, "Did not find resource and a retry is not expected to fix this issue. There will be no further automatic evaluation.") + return ctrl.Result{}, nil + } + logger.Error(err, "Resource was not found, so maybe it was deleted in the meantime. Retry later") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +func (h *ErrorHandler) handleInvalidBlueprintError(logger logr.Logger) (ctrl.Result, error) { + logger.Info("Blueprint is invalid, therefore there will be no further evaluation.") + return ctrl.Result{}, nil +} + +func (h *ErrorHandler) handleHealthError(logger logr.Logger) (ctrl.Result, error) { + // really normal case + logger.Info("Ecosystem is unhealthy. Retry later") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +func (h *ErrorHandler) handleStateDiffNotEmptyError(logger logr.Logger) (ctrl.Result, error) { + logger.Info("requeue until state diff is empty") + // fast requeue here since state diff has to be determined again + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil +} + +func (h *ErrorHandler) handleMultipleBlueprintsError(logger logr.Logger, err error) (ctrl.Result, error) { + logger.Error(err, "Ecosystem contains multiple blueprints - delete all but one. Retry later") + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +func (h *ErrorHandler) handleDogusNotUpToDateError(logger logr.Logger, err error) (ctrl.Result, error) { + // really normal case + logger.Info(fmt.Sprintf("Dogus are not up to date yet. Retry later: %s", err.Error())) + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +func (h *ErrorHandler) handleUnknownError(logger logr.Logger, err error) (ctrl.Result, error) { + logger.Error(err, "An unknown error type occurred. Retry with default backoff") + return ctrl.Result{}, err // automatic requeue because of non-nil err +} diff --git a/pkg/adapter/reconciler/error_handler_test.go b/pkg/adapter/reconciler/error_handler_test.go new file mode 100644 index 00000000..d8609430 --- /dev/null +++ b/pkg/adapter/reconciler/error_handler_test.go @@ -0,0 +1,193 @@ +package reconciler + +import ( + "fmt" + "testing" + "time" + + "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domain" + "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domainservice" + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + ctrl "sigs.k8s.io/controller-runtime" +) + +func Test_decideRequeueForError(t *testing.T) { + t.Run("should catch wrapped InternalError, issue a log line and requeue with error", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + intermediateErr := domainservice.NewInternalError(assert.AnError, "a generic oh-noez") + errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.Error(t, err) + assert.Equal(t, ctrl.Result{}, actual) + assert.Contains(t, logSinkMock.output, "0: An internal error occurred and can maybe be fixed by retrying it later") + }) + t.Run("should catch wrapped ConflictError, issue a log line and requeue timely", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + intermediateErr := &domainservice.ConflictError{ + WrappedError: assert.AnError, + Message: "a generic oh-noez", + } + errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.NoError(t, err) + assert.Equal(t, ctrl.Result{RequeueAfter: 1 * time.Second}, actual) + assert.Contains(t, logSinkMock.output, "0: A concurrent update happened in conflict to the processing of the blueprint spec. A retry could fix this issue") + }) + t.Run("should catch wrapped NotFoundError, issue a log line and requeue", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + intermediateErr := &domainservice.NotFoundError{ + WrappedError: assert.AnError, + Message: "a generic oh-noez", + } + errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.NoError(t, err) + assert.Equal(t, ctrl.Result{RequeueAfter: 10 * time.Second}, actual) + assert.Contains(t, logSinkMock.output, "0: Resource was not found, so maybe it was deleted in the meantime. Retry later") + }) + t.Run("should catch wrapped MultipleBlueprintsError, issue a error log line and requeue", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + intermediateErr := &domain.MultipleBlueprintsError{ + Message: "multiple blueprints found", + } + errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.NoError(t, err) + assert.Equal(t, ctrl.Result{RequeueAfter: 10 * time.Second}, actual) + assert.Contains(t, logSinkMock.output, "0: Ecosystem contains multiple blueprints - delete all but one. Retry later") + }) + t.Run("NotFoundError, should not retry if DoNotRetry-Flag is set", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + intermediateErr := &domainservice.NotFoundError{ + WrappedError: assert.AnError, + Message: "Blueprint does not exist", + DoNotRetry: true, + } + errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.NoError(t, err) + assert.Equal(t, ctrl.Result{}, actual) + assert.Contains(t, logSinkMock.output, "0: Did not find resource and a retry is not expected to fix this issue. There will be no further automatic evaluation.") + }) + t.Run("should catch wrapped InvalidBlueprintError, issue a log line and do not requeue", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + intermediateErr := &domain.InvalidBlueprintError{ + WrappedError: assert.AnError, + Message: "a generic oh-noez", + } + errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.NoError(t, err) + assert.Equal(t, ctrl.Result{}, actual) + assert.Contains(t, logSinkMock.output, "0: Blueprint is invalid, therefore there will be no further evaluation.") + }) + t.Run("should catch wrapped StateDiffNotEmptyError, issue a log line and requeue timely", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + intermediateErr := &domain.StateDiffNotEmptyError{ + Message: "a generic oh-noez", + } + errorChain := fmt.Errorf("could not do the thing: %w", intermediateErr) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.NoError(t, err) + assert.Equal(t, ctrl.Result{RequeueAfter: 1 * time.Second}, actual) + assert.Contains(t, logSinkMock.output, "0: requeue until state diff is empty") + }) + t.Run("should catch general errors, issue a log line and return requeue with error", func(t *testing.T) { + // given + logSinkMock := newTrivialTestLogSink() + testLogger := logr.New(logSinkMock) + + errorChain := fmt.Errorf("everything goes down the drain: %w", assert.AnError) + + // when + sut := NewErrorHandler() + actual, err := sut.handleError(testLogger, errorChain) + + // then + require.Error(t, err) + assert.Equal(t, ctrl.Result{}, actual) + assert.Contains(t, logSinkMock.output, "0: An unknown error type occurred. Retry with default backoff") + }) +} + +type testLogSink struct { + output []string + r logr.RuntimeInfo +} + +func newTrivialTestLogSink() *testLogSink { + var output []string + return &testLogSink{output: output, r: logr.RuntimeInfo{CallDepth: 1}} +} + +func (t *testLogSink) doLog(level int, msg string, _ ...interface{}) { + t.output = append(t.output, fmt.Sprintf("%d: %s", level, msg)) +} +func (t *testLogSink) Init(info logr.RuntimeInfo) { t.r = info } +func (t *testLogSink) Enabled(int) bool { return true } +func (t *testLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + t.doLog(level, msg, keysAndValues...) +} +func (t *testLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + t.doLog(0, msg, append(keysAndValues, err)...) +} +func (t *testLogSink) WithValues(...interface{}) logr.LogSink { return t } +func (t *testLogSink) WithName(string) logr.LogSink { return t } diff --git a/pkg/adapter/reconciler/interfaces.go b/pkg/adapter/reconciler/interfaces.go index aacc651c..1b782659 100644 --- a/pkg/adapter/reconciler/interfaces.go +++ b/pkg/adapter/reconciler/interfaces.go @@ -3,6 +3,7 @@ package reconciler import ( "context" + "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domainservice" "sigs.k8s.io/controller-runtime/pkg/manager" ) @@ -18,3 +19,7 @@ type BlueprintChangeHandler interface { HandleUntilApplied(ctx context.Context, blueprintId string) error CheckForMultipleBlueprintResources(ctx context.Context) error } + +type BlueprintSpecRepository interface { + domainservice.BlueprintSpecRepository +} diff --git a/pkg/adapter/reconciler/mock_BlueprintSpecRepository_test.go b/pkg/adapter/reconciler/mock_BlueprintSpecRepository_test.go new file mode 100644 index 00000000..1e86ce6f --- /dev/null +++ b/pkg/adapter/reconciler/mock_BlueprintSpecRepository_test.go @@ -0,0 +1,258 @@ +// Code generated by mockery v2.53.3. DO NOT EDIT. + +package reconciler + +import ( + context "context" + + domain "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domain" + mock "github.com/stretchr/testify/mock" +) + +// MockBlueprintSpecRepository is an autogenerated mock type for the BlueprintSpecRepository type +type MockBlueprintSpecRepository struct { + mock.Mock +} + +type MockBlueprintSpecRepository_Expecter struct { + mock *mock.Mock +} + +func (_m *MockBlueprintSpecRepository) EXPECT() *MockBlueprintSpecRepository_Expecter { + return &MockBlueprintSpecRepository_Expecter{mock: &_m.Mock} +} + +// Count provides a mock function with given fields: ctx, limit +func (_m *MockBlueprintSpecRepository) Count(ctx context.Context, limit int) (int, error) { + ret := _m.Called(ctx, limit) + + if len(ret) == 0 { + panic("no return value specified for Count") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int) (int, error)); ok { + return rf(ctx, limit) + } + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, limit) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBlueprintSpecRepository_Count_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Count' +type MockBlueprintSpecRepository_Count_Call struct { + *mock.Call +} + +// Count is a helper method to define mock.On call +// - ctx context.Context +// - limit int +func (_e *MockBlueprintSpecRepository_Expecter) Count(ctx interface{}, limit interface{}) *MockBlueprintSpecRepository_Count_Call { + return &MockBlueprintSpecRepository_Count_Call{Call: _e.mock.On("Count", ctx, limit)} +} + +func (_c *MockBlueprintSpecRepository_Count_Call) Run(run func(ctx context.Context, limit int)) *MockBlueprintSpecRepository_Count_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int)) + }) + return _c +} + +func (_c *MockBlueprintSpecRepository_Count_Call) Return(_a0 int, _a1 error) *MockBlueprintSpecRepository_Count_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBlueprintSpecRepository_Count_Call) RunAndReturn(run func(context.Context, int) (int, error)) *MockBlueprintSpecRepository_Count_Call { + _c.Call.Return(run) + return _c +} + +// GetById provides a mock function with given fields: ctx, blueprintId +func (_m *MockBlueprintSpecRepository) GetById(ctx context.Context, blueprintId string) (*domain.BlueprintSpec, error) { + ret := _m.Called(ctx, blueprintId) + + if len(ret) == 0 { + panic("no return value specified for GetById") + } + + var r0 *domain.BlueprintSpec + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*domain.BlueprintSpec, error)); ok { + return rf(ctx, blueprintId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *domain.BlueprintSpec); ok { + r0 = rf(ctx, blueprintId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*domain.BlueprintSpec) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, blueprintId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBlueprintSpecRepository_GetById_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetById' +type MockBlueprintSpecRepository_GetById_Call struct { + *mock.Call +} + +// GetById is a helper method to define mock.On call +// - ctx context.Context +// - blueprintId string +func (_e *MockBlueprintSpecRepository_Expecter) GetById(ctx interface{}, blueprintId interface{}) *MockBlueprintSpecRepository_GetById_Call { + return &MockBlueprintSpecRepository_GetById_Call{Call: _e.mock.On("GetById", ctx, blueprintId)} +} + +func (_c *MockBlueprintSpecRepository_GetById_Call) Run(run func(ctx context.Context, blueprintId string)) *MockBlueprintSpecRepository_GetById_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockBlueprintSpecRepository_GetById_Call) Return(_a0 *domain.BlueprintSpec, _a1 error) *MockBlueprintSpecRepository_GetById_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBlueprintSpecRepository_GetById_Call) RunAndReturn(run func(context.Context, string) (*domain.BlueprintSpec, error)) *MockBlueprintSpecRepository_GetById_Call { + _c.Call.Return(run) + return _c +} + +// ListIds provides a mock function with given fields: ctx +func (_m *MockBlueprintSpecRepository) ListIds(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ListIds") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBlueprintSpecRepository_ListIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListIds' +type MockBlueprintSpecRepository_ListIds_Call struct { + *mock.Call +} + +// ListIds is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockBlueprintSpecRepository_Expecter) ListIds(ctx interface{}) *MockBlueprintSpecRepository_ListIds_Call { + return &MockBlueprintSpecRepository_ListIds_Call{Call: _e.mock.On("ListIds", ctx)} +} + +func (_c *MockBlueprintSpecRepository_ListIds_Call) Run(run func(ctx context.Context)) *MockBlueprintSpecRepository_ListIds_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockBlueprintSpecRepository_ListIds_Call) Return(_a0 []string, _a1 error) *MockBlueprintSpecRepository_ListIds_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBlueprintSpecRepository_ListIds_Call) RunAndReturn(run func(context.Context) ([]string, error)) *MockBlueprintSpecRepository_ListIds_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: ctx, blueprintSpec +func (_m *MockBlueprintSpecRepository) Update(ctx context.Context, blueprintSpec *domain.BlueprintSpec) error { + ret := _m.Called(ctx, blueprintSpec) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *domain.BlueprintSpec) error); ok { + r0 = rf(ctx, blueprintSpec) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBlueprintSpecRepository_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type MockBlueprintSpecRepository_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - ctx context.Context +// - blueprintSpec *domain.BlueprintSpec +func (_e *MockBlueprintSpecRepository_Expecter) Update(ctx interface{}, blueprintSpec interface{}) *MockBlueprintSpecRepository_Update_Call { + return &MockBlueprintSpecRepository_Update_Call{Call: _e.mock.On("Update", ctx, blueprintSpec)} +} + +func (_c *MockBlueprintSpecRepository_Update_Call) Run(run func(ctx context.Context, blueprintSpec *domain.BlueprintSpec)) *MockBlueprintSpecRepository_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*domain.BlueprintSpec)) + }) + return _c +} + +func (_c *MockBlueprintSpecRepository_Update_Call) Return(_a0 error) *MockBlueprintSpecRepository_Update_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBlueprintSpecRepository_Update_Call) RunAndReturn(run func(context.Context, *domain.BlueprintSpec) error) *MockBlueprintSpecRepository_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewMockBlueprintSpecRepository creates a new instance of MockBlueprintSpecRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockBlueprintSpecRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *MockBlueprintSpecRepository { + mock := &MockBlueprintSpecRepository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/adapter/reconciler/predicate_factory.go b/pkg/adapter/reconciler/predicate_factory.go new file mode 100644 index 00000000..0f39999e --- /dev/null +++ b/pkg/adapter/reconciler/predicate_factory.go @@ -0,0 +1,66 @@ +package reconciler + +import ( + "time" + + v2 "github.com/cloudogu/k8s-dogu-lib/v2/api/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +func makeResourcePredicate[T client.Object](resourceFunc func(client.Object) bool) predicate.TypedFuncs[T] { + return predicate.TypedFuncs[T]{ + CreateFunc: func(event.TypedCreateEvent[T]) bool { return false }, // ignore creates + UpdateFunc: func(e event.TypedUpdateEvent[T]) bool { return resourceFunc(e.ObjectNew) }, + DeleteFunc: func(e event.TypedDeleteEvent[T]) bool { return resourceFunc(e.Object) }, + GenericFunc: func(event.TypedGenericEvent[T]) bool { return false }, // ignore generics + } +} + +func makeContentPredicate[T client.Object]( + debounce *SingletonDebounce, + window time.Duration, + changed func(oldObj, newObj T) bool, +) predicate.TypedFuncs[T] { + return predicate.TypedFuncs[T]{ + CreateFunc: func(event.TypedCreateEvent[T]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[T]) bool { + if !changed(e.ObjectOld, e.ObjectNew) { + return false + } + return debounce.AllowOrMark(window) + }, + DeleteFunc: func(event.TypedDeleteEvent[T]) bool { return debounce.AllowOrMark(window) }, // reconcile on delete + GenericFunc: func(event.TypedGenericEvent[T]) bool { return false }, + } +} + +func configMapContentChanged(oldCM, newCM *corev1.ConfigMap) bool { + return !equality.Semantic.DeepEqual(oldCM.Data, newCM.Data) || + !equality.Semantic.DeepEqual(oldCM.BinaryData, newCM.BinaryData) +} + +func secretContentChanged(oldS, newS *corev1.Secret) bool { + return !equality.Semantic.DeepEqual(oldS.Data, newS.Data) || + !equality.Semantic.DeepEqual(oldS.Immutable, newS.Immutable) +} + +func doguSpecChanged(oldObj, newObj *v2.Dogu) bool { + return !equality.Semantic.DeepEqual(oldObj.Spec, newObj.Spec) +} + +func hasCesLabel(o client.Object) bool { + // Consider only CES ConfigMaps that are doguConfig or globalConfig + return o.GetLabels()["app"] == "ces" && (o.GetLabels()["dogu.name"] != "" || o.GetLabels()["k8s.cloudogu.com/type"] == "global-config") +} + +func hasNotDoguDescriptorLabel(o client.Object) bool { + return o.GetLabels()["k8s.cloudogu.com/type"] != "local-dogu-registry" +} + +func (r *BlueprintReconciler) hasOperatorNamespace(o client.Object) bool { + return r.namespace == "" || o.GetNamespace() == r.namespace +} diff --git a/pkg/adapter/reconciler/predicate_factory_test.go b/pkg/adapter/reconciler/predicate_factory_test.go new file mode 100644 index 00000000..0b8a2ea6 --- /dev/null +++ b/pkg/adapter/reconciler/predicate_factory_test.go @@ -0,0 +1,436 @@ +package reconciler + +import ( + "testing" + "time" + + v2 "github.com/cloudogu/k8s-dogu-lib/v2/api/v2" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestMakeResourcePredicate(t *testing.T) { + filterFunc := func(o client.Object) bool { + return o.GetNamespace() == "test-namespace" + } + + predicate := makeResourcePredicate[*corev1.ConfigMap](filterFunc) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "test-namespace", + }, + } + + cmWrongNS := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "wrong-namespace", + }, + } + + t.Run("CreateFunc should return false", func(t *testing.T) { + result := predicate.CreateFunc(event.TypedCreateEvent[*corev1.ConfigMap]{ + Object: cm, + }) + assert.False(t, result) + }) + + t.Run("UpdateFunc should use filter function", func(t *testing.T) { + result := predicate.UpdateFunc(event.TypedUpdateEvent[*corev1.ConfigMap]{ + ObjectOld: cm, + ObjectNew: cm, + }) + assert.True(t, result) + + result = predicate.UpdateFunc(event.TypedUpdateEvent[*corev1.ConfigMap]{ + ObjectOld: cmWrongNS, + ObjectNew: cmWrongNS, + }) + assert.False(t, result) + }) + + t.Run("DeleteFunc should use filter function", func(t *testing.T) { + result := predicate.DeleteFunc(event.TypedDeleteEvent[*corev1.ConfigMap]{ + Object: cm, + }) + assert.True(t, result) + + result = predicate.DeleteFunc(event.TypedDeleteEvent[*corev1.ConfigMap]{ + Object: cmWrongNS, + }) + assert.False(t, result) + }) + + t.Run("GenericFunc should return false", func(t *testing.T) { + result := predicate.GenericFunc(event.TypedGenericEvent[*corev1.ConfigMap]{ + Object: cm, + }) + assert.False(t, result) + }) +} + +func TestMakeContentPredicate(t *testing.T) { + debounce := &SingletonDebounce{} + window := 50 * time.Millisecond + + changeFunc := func(old, new *corev1.ConfigMap) bool { + return old.Data["key"] != new.Data["key"] + } + + predicate := makeContentPredicate(debounce, window, changeFunc) + + cmOld := &corev1.ConfigMap{ + Data: map[string]string{"key": "old-value"}, + } + + cmNew := &corev1.ConfigMap{ + Data: map[string]string{"key": "new-value"}, + } + + cmSame := &corev1.ConfigMap{ + Data: map[string]string{"key": "old-value"}, + } + + t.Run("CreateFunc should return false", func(t *testing.T) { + result := predicate.CreateFunc(event.TypedCreateEvent[*corev1.ConfigMap]{ + Object: cmNew, + }) + assert.False(t, result) + }) + + t.Run("UpdateFunc should handle content changes with debouncing", func(t *testing.T) { + // Reset debounce + debounce = &SingletonDebounce{} + predicate = makeContentPredicate(debounce, window, changeFunc) + + // First change should be allowed + result := predicate.UpdateFunc(event.TypedUpdateEvent[*corev1.ConfigMap]{ + ObjectOld: cmOld, + ObjectNew: cmNew, + }) + assert.True(t, result) + + // Immediate second change should be debounced + result = predicate.UpdateFunc(event.TypedUpdateEvent[*corev1.ConfigMap]{ + ObjectOld: cmOld, + ObjectNew: cmNew, + }) + assert.False(t, result) + }) + + t.Run("UpdateFunc should return false on no content changes", func(t *testing.T) { + // Reset debounce + debounce = &SingletonDebounce{} + predicate = makeContentPredicate(debounce, window, changeFunc) + + // No change should return false + result := predicate.UpdateFunc(event.TypedUpdateEvent[*corev1.ConfigMap]{ + ObjectOld: cmOld, + ObjectNew: cmSame, + }) + assert.False(t, result) + + // Immediate second change should be allowed + result = predicate.UpdateFunc(event.TypedUpdateEvent[*corev1.ConfigMap]{ + ObjectOld: cmOld, + ObjectNew: cmNew, + }) + assert.True(t, result) + }) + + t.Run("DeleteFunc should trigger debouncing", func(t *testing.T) { + // Reset debounce + debounce = &SingletonDebounce{} + predicate = makeContentPredicate(debounce, window, changeFunc) + + result := predicate.DeleteFunc(event.TypedDeleteEvent[*corev1.ConfigMap]{ + Object: cmOld, + }) + assert.True(t, result) + + // Immediate second change should be debounced + result = predicate.DeleteFunc(event.TypedDeleteEvent[*corev1.ConfigMap]{ + Object: cmOld, + }) + assert.False(t, result) + }) + + t.Run("GenericFunc should return false", func(t *testing.T) { + result := predicate.GenericFunc(event.TypedGenericEvent[*corev1.ConfigMap]{ + Object: cmOld, + }) + assert.False(t, result) + }) +} + +func TestConfigMapContentChanged(t *testing.T) { + tests := []struct { + name string + oldCM *corev1.ConfigMap + newCM *corev1.ConfigMap + expected bool + }{ + { + name: "data changed", + oldCM: &corev1.ConfigMap{ + Data: map[string]string{"key": "old"}, + }, + newCM: &corev1.ConfigMap{ + Data: map[string]string{"key": "new"}, + }, + expected: true, + }, + { + name: "binary data changed", + oldCM: &corev1.ConfigMap{ + BinaryData: map[string][]byte{"key": []byte("old")}, + }, + newCM: &corev1.ConfigMap{ + BinaryData: map[string][]byte{"key": []byte("new")}, + }, + expected: true, + }, + { + name: "no change", + oldCM: &corev1.ConfigMap{ + Data: map[string]string{"key": "same"}, + }, + newCM: &corev1.ConfigMap{ + Data: map[string]string{"key": "same"}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := configMapContentChanged(tt.oldCM, tt.newCM) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestSecretContentChanged(t *testing.T) { + immutableTrue := true + immutableFalse := false + + tests := []struct { + name string + oldSecret *corev1.Secret + newSecret *corev1.Secret + expected bool + }{ + { + name: "data changed", + oldSecret: &corev1.Secret{ + Data: map[string][]byte{"key": []byte("old")}, + }, + newSecret: &corev1.Secret{ + Data: map[string][]byte{"key": []byte("new")}, + }, + expected: true, + }, + { + name: "immutable changed", + oldSecret: &corev1.Secret{ + Immutable: &immutableTrue, + }, + newSecret: &corev1.Secret{ + Immutable: &immutableFalse, + }, + expected: true, + }, + { + name: "no change", + oldSecret: &corev1.Secret{ + Data: map[string][]byte{"key": []byte("same")}, + }, + newSecret: &corev1.Secret{ + Data: map[string][]byte{"key": []byte("same")}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := secretContentChanged(tt.oldSecret, tt.newSecret) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestDoguSpecChanged(t *testing.T) { + tests := []struct { + name string + oldDogu *v2.Dogu + newDogu *v2.Dogu + expected bool + }{ + { + name: "spec changed", + oldDogu: &v2.Dogu{ + Spec: v2.DoguSpec{ + Name: "old-name", + Version: "1.2.3-1", + }, + }, + newDogu: &v2.Dogu{ + Spec: v2.DoguSpec{ + Name: "new-name", + Version: "1.2.3-2", + }, + }, + expected: true, + }, + { + name: "no change", + oldDogu: &v2.Dogu{ + Spec: v2.DoguSpec{ + Name: "same-name", + Version: "1.2.3-1", + }, + }, + newDogu: &v2.Dogu{ + Spec: v2.DoguSpec{ + Name: "same-name", + Version: "1.2.3-1", + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := doguSpecChanged(tt.oldDogu, tt.newDogu) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestHasCesLabel(t *testing.T) { + tests := []struct { + name string + obj client.Object + expected bool + }{ + { + name: "has ces dogu config labels", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "ces", + "dogu.name": "test-dogu", + }, + }, + }, + expected: true, + }, + { + name: "has ces global config labels", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "ces", + "dogu.name": "test-dogu", + }, + }, + }, + expected: true, + }, + { + name: "missing app label", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "dogu.name": "test-dogu", + }, + }, + }, + expected: false, + }, + { + name: "missing dogu.name label", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "ces", + }, + }, + }, + expected: false, + }, + { + name: "wrong app label", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "other", + "dogu.name": "test-dogu", + }, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := hasCesLabel(tt.obj) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestHasNotDoguDescriptorLabel(t *testing.T) { + tests := []struct { + name string + obj client.Object + expected bool + }{ + { + name: "no type label", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + expected: true, + }, + { + name: "different type label", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k8s.cloudogu.com/type": "other-type", + }, + }, + }, + expected: true, + }, + { + name: "has dogu descriptor label", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "k8s.cloudogu.com/type": "local-dogu-registry", + }, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := hasNotDoguDescriptorLabel(tt.obj) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/adapter/reconciler/reconcile_debounce.go b/pkg/adapter/reconciler/reconcile_debounce.go new file mode 100644 index 00000000..a1ef8317 --- /dev/null +++ b/pkg/adapter/reconciler/reconcile_debounce.go @@ -0,0 +1,50 @@ +package reconciler + +import ( + "sync" + "time" +) + +// SingletonDebounce debounces reconcilation events triggered by cluster resources other than the blueprint, to avoid +// too many unnecessary reconciliations. +type SingletonDebounce struct { + mu sync.Mutex + next time.Time // when the next event is allowed + pending bool // true if something arrived during cooldown +} + +// AllowOrMark returns true if we should enqueue now. +// If we're still in cooldown, it marks pending and returns false. +func (d *SingletonDebounce) AllowOrMark(window time.Duration) bool { + now := time.Now() + + d.mu.Lock() + defer d.mu.Unlock() + + if now.Before(d.next) { + d.pending = true + return false + } + d.next = now.Add(window) + d.pending = false + return true +} + +// ShouldRequeue tells if we should do a trailing reconciliation after cooldown. +// Returns (ShouldRequeue, remainingCooldown). +func (d *SingletonDebounce) ShouldRequeue() (bool, time.Duration) { + now := time.Now() + + d.mu.Lock() + defer d.mu.Unlock() + + if !d.pending { + return false, 0 + } + remaining := time.Duration(0) + if now.Before(d.next) { + remaining = d.next.Sub(now) + } + d.pending = false + return true, remaining +} diff --git a/pkg/adapter/reconciler/reconcile_debounce_test.go b/pkg/adapter/reconciler/reconcile_debounce_test.go new file mode 100644 index 00000000..fcb43f3a --- /dev/null +++ b/pkg/adapter/reconciler/reconcile_debounce_test.go @@ -0,0 +1,115 @@ +package reconciler + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestSingletonDebounce_AllowOrMark(t *testing.T) { + t.Run("first call should allow", func(t *testing.T) { + // given + debounce := &SingletonDebounce{} + window := 100 * time.Millisecond + // when + allowed := debounce.AllowOrMark(window) + // then + assert.True(t, allowed) + }) + + t.Run("immediate second call should mark pending", func(t *testing.T) { + // given + debounce := &SingletonDebounce{} + window := 100 * time.Millisecond + allowed := debounce.AllowOrMark(window) + assert.True(t, allowed) + // when + allowed = debounce.AllowOrMark(window) + // then + assert.False(t, allowed) + assert.True(t, debounce.pending) + }) + + t.Run("call after window should allow", func(t *testing.T) { + // given + debounce := &SingletonDebounce{} + window := 100 * time.Millisecond + allowed := debounce.AllowOrMark(window) + assert.True(t, allowed) + // when + time.Sleep(window + 10*time.Millisecond) // wait for window to pass + allowed = debounce.AllowOrMark(window) + // then + assert.True(t, allowed) + assert.False(t, debounce.pending) + }) +} + +func TestSingletonDebounce_ShouldRequeue(t *testing.T) { + t.Run("no pending should not requeue", func(t *testing.T) { + // given + debounce := &SingletonDebounce{} + // when + shouldRequeue, remaining := debounce.ShouldRequeue() + // then + assert.False(t, shouldRequeue) + assert.Equal(t, time.Duration(0), remaining) + }) + + t.Run("with pending should requeue", func(t *testing.T) { + // given + debounce := &SingletonDebounce{} + window := 100 * time.Millisecond + // Mark as pending + debounce.AllowOrMark(window) + debounce.AllowOrMark(window) // This should mark pending + // when + shouldRequeue, remaining := debounce.ShouldRequeue() + // then + assert.True(t, shouldRequeue) + assert.True(t, remaining > 0) + assert.False(t, debounce.pending) // Should clear pending + }) + + t.Run("after window expired should requeue with no remaining time", func(t *testing.T) { + // given + debounce := &SingletonDebounce{} + window := 100 * time.Millisecond + // Mark as pending and wait + debounce.AllowOrMark(window) + debounce.AllowOrMark(window) // This should mark pending + time.Sleep(window + 10*time.Millisecond) + // when + shouldRequeue, remaining := debounce.ShouldRequeue() + // then + assert.True(t, shouldRequeue) + assert.Equal(t, time.Duration(0), remaining) + assert.False(t, debounce.pending) // Should clear pending + }) +} + +func TestSingletonDebounce_Concurrency(t *testing.T) { + debounce := &SingletonDebounce{} + window := 50 * time.Millisecond + + // Test that multiple goroutines can safely access the debounce + done := make(chan bool, 10) + + for i := 0; i < 10; i++ { + go func() { + debounce.AllowOrMark(window) + debounce.ShouldRequeue() + done <- true + }() + } + + // Wait for all goroutines to complete + for i := 0; i < 10; i++ { + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("Test timed out - possible deadlock") + } + } +} diff --git a/pkg/application/doguInstallationUseCase.go b/pkg/application/doguInstallationUseCase.go index 99ec0f68..405b9a5f 100644 --- a/pkg/application/doguInstallationUseCase.go +++ b/pkg/application/doguInstallationUseCase.go @@ -9,6 +9,7 @@ import ( "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domain/ecosystem" "github.com/cloudogu/k8s-blueprint-operator/v2/pkg/domainservice" "golang.org/x/exp/maps" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -18,23 +19,26 @@ const noDowngradesExplanationTextFmt = "downgrades are not allowed as the data m "If you want an 'allow-downgrades' flag, issue a feature request" type DoguInstallationUseCase struct { - blueprintSpecRepo blueprintSpecRepository - doguRepo doguInstallationRepository - doguConfigRepo doguConfigRepository - globalConfigRepo globalConfigRepository + blueprintSpecRepo blueprintSpecRepository + doguRepo doguInstallationRepository + globalConfigRepo globalConfigRepository + doguConfigRepo doguConfigRepository + sensitiveDoguConfigRepo sensitiveDoguConfigRepository } func NewDoguInstallationUseCase( blueprintSpecRepo domainservice.BlueprintSpecRepository, doguRepo domainservice.DoguInstallationRepository, - doguConfigRepo doguConfigRepository, globalConfigRepo globalConfigRepository, + doguConfigRepo doguConfigRepository, + sensitiveDoguConfigRepo sensitiveDoguConfigRepository, ) *DoguInstallationUseCase { return &DoguInstallationUseCase{ - blueprintSpecRepo: blueprintSpecRepo, - doguRepo: doguRepo, - doguConfigRepo: doguConfigRepo, - globalConfigRepo: globalConfigRepo, + blueprintSpecRepo: blueprintSpecRepo, + doguRepo: doguRepo, + globalConfigRepo: globalConfigRepo, + doguConfigRepo: doguConfigRepo, + sensitiveDoguConfigRepo: sensitiveDoguConfigRepo, } } @@ -72,12 +76,12 @@ func (useCase *DoguInstallationUseCase) CheckDogusUpToDate(ctx context.Context) continue } - doguConfig, err := useCase.doguConfigRepo.Get(ctx, doguName) + doguConfigUpdateTime, sensitiveDoguConfigUpdateTime, err := useCase.getDoguConfigUpdateTimes(ctx, doguName) if err != nil { return nil, err } - doguConfigUpdateTime := doguConfig.LastUpdated - configUpToDate := dogu.IsConfigUpToDate(globalConfigUpdateTime, doguConfigUpdateTime) + + configUpToDate := dogu.IsConfigUpToDate(globalConfigUpdateTime, doguConfigUpdateTime, sensitiveDoguConfigUpdateTime) if !configUpToDate { dogusNotUpToDate = append(dogusNotUpToDate, doguName) continue @@ -87,6 +91,21 @@ func (useCase *DoguInstallationUseCase) CheckDogusUpToDate(ctx context.Context) return dogusNotUpToDate, nil } +func (useCase *DoguInstallationUseCase) getDoguConfigUpdateTimes(ctx context.Context, doguName cescommons.SimpleName) (*metav1.Time, *metav1.Time, error) { + doguConfig, err := useCase.doguConfigRepo.Get(ctx, doguName) + if err != nil { + return nil, nil, err + } + doguConfigUpdateTime := doguConfig.LastUpdated + + sensitveDoguConfig, err := useCase.sensitiveDoguConfigRepo.Get(ctx, doguName) + if err != nil { + return nil, nil, err + } + sensitiveDoguConfigUpdateTime := sensitveDoguConfig.LastUpdated + return doguConfigUpdateTime, sensitiveDoguConfigUpdateTime, nil +} + // ApplyDoguStates applies the expected dogu state from the Blueprint to the ecosystem. // Fail-fast here, so that the possible damage is as small as possible. func (useCase *DoguInstallationUseCase) ApplyDoguStates(ctx context.Context, blueprint *domain.BlueprintSpec) error { diff --git a/pkg/application/doguInstallationUseCase_test.go b/pkg/application/doguInstallationUseCase_test.go index ea06a58c..66db39f2 100644 --- a/pkg/application/doguInstallationUseCase_test.go +++ b/pkg/application/doguInstallationUseCase_test.go @@ -45,7 +45,7 @@ var ( func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { t.Run("action none", func(t *testing.T) { // given - sut := NewDoguInstallationUseCase(nil, nil, nil, nil) + sut := NewDoguInstallationUseCase(nil, nil, nil, nil, nil) // when err := sut.applyDoguState(testCtx, domain.DoguDiff{ @@ -93,7 +93,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { ecosystem.InstallDogu(postgresqlQualifiedName, &version3211, &volumeSize, proxyConfig, additionalMounts)). Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -136,7 +136,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { Delete(testCtx, cescommons.SimpleName("postgresql")). Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -159,7 +159,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { t.Run("action uninstall throws NotFoundError when dogu not found", func(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -188,7 +188,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { Update(testCtx, dogu). Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) dogu.PauseReconciliation = true // test if it gets reset on update (the dogu in the EXPECT Update call has this to false) @@ -218,7 +218,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { Version: version3212, } - sut := NewDoguInstallationUseCase(nil, nil, nil, nil) + sut := NewDoguInstallationUseCase(nil, nil, nil, nil, nil) // when err := sut.applyDoguState( @@ -255,7 +255,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().Update(testCtx, expectedDogu).Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -295,7 +295,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().Update(testCtx, expectedDogu).Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -334,7 +334,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().Update(testCtx, expectedDogu).Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -373,7 +373,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().Update(testCtx, expectedDogu).Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -456,7 +456,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().Update(testCtx, expectedDogu).Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState(testCtx, diff, dogu, domain.BlueprintConfiguration{}) @@ -495,7 +495,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().Update(testCtx, expectedDogu).Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -531,7 +531,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { Version: version3212, } - sut := NewDoguInstallationUseCase(nil, nil, nil, nil) + sut := NewDoguInstallationUseCase(nil, nil, nil, nil, nil) // when err := sut.applyDoguState( @@ -561,7 +561,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().Update(testCtx, dogu).Return(nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.applyDoguState( @@ -586,7 +586,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { t.Run("unknown action", func(t *testing.T) { // given - sut := NewDoguInstallationUseCase(nil, nil, nil, nil) + sut := NewDoguInstallationUseCase(nil, nil, nil, nil, nil) // when err := sut.applyDoguState( @@ -608,7 +608,7 @@ func TestDoguInstallationUseCase_applyDoguState(t *testing.T) { t.Run("should no fail with no actions", func(t *testing.T) { // given - sut := NewDoguInstallationUseCase(nil, nil, nil, nil) + sut := NewDoguInstallationUseCase(nil, nil, nil, nil, nil) // when err := sut.applyDoguState( @@ -635,7 +635,7 @@ func TestDoguInstallationUseCase_ApplyDoguStates(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().GetAll(testCtx).Return(nil, assert.AnError) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.ApplyDoguStates(testCtx, &domain.BlueprintSpec{}) @@ -663,7 +663,7 @@ func TestDoguInstallationUseCase_ApplyDoguStates(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().GetAll(testCtx).Return(map[cescommons.SimpleName]*ecosystem.DoguInstallation{}, nil) - sut := NewDoguInstallationUseCase(blueprintSpecRepoMock, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(blueprintSpecRepoMock, doguRepoMock, nil, nil, nil) // when err := sut.ApplyDoguStates(testCtx, blueprint) @@ -695,7 +695,7 @@ func TestDoguInstallationUseCase_ApplyDoguStates(t *testing.T) { }, }, nil) - sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil) + sut := NewDoguInstallationUseCase(nil, doguRepoMock, nil, nil, nil) // when err := sut.ApplyDoguStates(testCtx, blueprint) @@ -736,6 +736,7 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { } globalConfigRepoMock.EXPECT().Get(testCtx).Return(globalConf, nil) doguConfigRepoMock := newMockDoguConfigRepository(t) + sensitveDoguConfigRepoMock := newMockSensitiveDoguConfigRepository(t) postgresDoguConf := config.DoguConfig{ DoguName: postgresqlQualifiedName.SimpleName, Config: config.Config{ @@ -750,11 +751,14 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { } doguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(postgresDoguConf, nil) doguConfigRepoMock.EXPECT().Get(testCtx, ldapQualifiedName.SimpleName).Return(ldapDoguConf, nil) + sensitveDoguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(postgresDoguConf, nil) + sensitveDoguConfigRepoMock.EXPECT().Get(testCtx, ldapQualifiedName.SimpleName).Return(ldapDoguConf, nil) useCase := &DoguInstallationUseCase{ - doguRepo: doguRepoMock, - doguConfigRepo: doguConfigRepoMock, - globalConfigRepo: globalConfigRepoMock, + doguRepo: doguRepoMock, + globalConfigRepo: globalConfigRepoMock, + doguConfigRepo: doguConfigRepoMock, + sensitiveDoguConfigRepo: sensitveDoguConfigRepoMock, } // when @@ -816,6 +820,7 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { } globalConfigRepoMock.EXPECT().Get(testCtx).Return(globalConf, nil) doguConfigRepoMock := newMockDoguConfigRepository(t) + sensitiveDoguConfigRepoMock := newMockSensitiveDoguConfigRepository(t) doguConf := config.DoguConfig{ DoguName: postgresqlQualifiedName.SimpleName, Config: config.Config{ @@ -823,11 +828,13 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { }, } doguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(doguConf, nil) + sensitiveDoguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(doguConf, nil) useCase := &DoguInstallationUseCase{ - doguRepo: doguRepoMock, - doguConfigRepo: doguConfigRepoMock, - globalConfigRepo: globalConfigRepoMock, + doguRepo: doguRepoMock, + globalConfigRepo: globalConfigRepoMock, + doguConfigRepo: doguConfigRepoMock, + sensitiveDoguConfigRepo: sensitiveDoguConfigRepoMock, } // when @@ -864,11 +871,70 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { }, } doguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(doguConf, nil) + sensitiveDoguConfigRepoMock := newMockSensitiveDoguConfigRepository(t) + sensitivbeDoguConf := config.DoguConfig{ + DoguName: postgresqlQualifiedName.SimpleName, + Config: config.Config{ + LastUpdated: &timeMay, + }, + } + sensitiveDoguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(sensitivbeDoguConf, nil) useCase := &DoguInstallationUseCase{ - doguRepo: doguRepoMock, - doguConfigRepo: doguConfigRepoMock, - globalConfigRepo: globalConfigRepoMock, + doguRepo: doguRepoMock, + globalConfigRepo: globalConfigRepoMock, + doguConfigRepo: doguConfigRepoMock, + sensitiveDoguConfigRepo: sensitiveDoguConfigRepoMock, + } + + // when + dogusNotUpToDate, err := useCase.CheckDogusUpToDate(testCtx) + // then + require.NoError(t, err) + assert.Len(t, dogusNotUpToDate, 1) + assert.Equal(t, dogusNotUpToDate[0], postgresqlQualifiedName.SimpleName) + }) + t.Run("sensitive dogu config is not up to date", func(t *testing.T) { + // given + doguRepoMock := newMockDoguInstallationRepository(t) + doguRepoMock.EXPECT().GetAll(testCtx).Return(map[cescommons.SimpleName]*ecosystem.DoguInstallation{ + "postgresql": { + Name: postgresqlQualifiedName, + Version: version3211, + InstalledVersion: version3211, + StartedAt: timeJune, + }, + }, nil) + + globalConfigRepoMock := newMockGlobalConfigRepository(t) + globalConf := config.GlobalConfig{ + Config: config.Config{ + LastUpdated: &timeMay, + }, + } + globalConfigRepoMock.EXPECT().Get(testCtx).Return(globalConf, nil) + doguConfigRepoMock := newMockDoguConfigRepository(t) + doguConf := config.DoguConfig{ + DoguName: postgresqlQualifiedName.SimpleName, + Config: config.Config{ + LastUpdated: &timeMay, + }, + } + doguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(doguConf, nil) + sensitiveDoguConfigRepoMock := newMockSensitiveDoguConfigRepository(t) + sensitivbeDoguConf := config.DoguConfig{ + DoguName: postgresqlQualifiedName.SimpleName, + Config: config.Config{ + LastUpdated: &timeJuly, + }, + } + sensitiveDoguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(sensitivbeDoguConf, nil) + + useCase := &DoguInstallationUseCase{ + doguRepo: doguRepoMock, + globalConfigRepo: globalConfigRepoMock, + doguConfigRepo: doguConfigRepoMock, + sensitiveDoguConfigRepo: sensitiveDoguConfigRepoMock, } // when @@ -910,6 +976,7 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { } globalConfigRepoMock.EXPECT().Get(testCtx).Return(globalConf, nil) doguConfigRepoMock := newMockDoguConfigRepository(t) + sensitiveDoguConfigRepoMock := newMockSensitiveDoguConfigRepository(t) ldapDoguConf := config.DoguConfig{ DoguName: postgresqlQualifiedName.SimpleName, Config: config.Config{ @@ -924,11 +991,14 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { } doguConfigRepoMock.EXPECT().Get(testCtx, ldapQualifiedName.SimpleName).Return(ldapDoguConf, nil) doguConfigRepoMock.EXPECT().Get(testCtx, casQualifiedName.SimpleName).Return(casDoguConf, nil) + sensitiveDoguConfigRepoMock.EXPECT().Get(testCtx, ldapQualifiedName.SimpleName).Return(ldapDoguConf, nil) + sensitiveDoguConfigRepoMock.EXPECT().Get(testCtx, casQualifiedName.SimpleName).Return(casDoguConf, nil) useCase := &DoguInstallationUseCase{ - doguRepo: doguRepoMock, - doguConfigRepo: doguConfigRepoMock, - globalConfigRepo: globalConfigRepoMock, + doguRepo: doguRepoMock, + globalConfigRepo: globalConfigRepoMock, + doguConfigRepo: doguConfigRepoMock, + sensitiveDoguConfigRepo: sensitiveDoguConfigRepoMock, } // when @@ -946,13 +1016,8 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { doguRepoMock := newMockDoguInstallationRepository(t) doguRepoMock.EXPECT().GetAll(testCtx).Return(nil, assert.AnError) - globalConfigRepoMock := newMockGlobalConfigRepository(t) - doguConfigRepoMock := newMockDoguConfigRepository(t) - useCase := &DoguInstallationUseCase{ - doguRepo: doguRepoMock, - doguConfigRepo: doguConfigRepoMock, - globalConfigRepo: globalConfigRepoMock, + doguRepo: doguRepoMock, } // when @@ -968,11 +1033,9 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { globalConfigRepoMock := newMockGlobalConfigRepository(t) globalConfigRepoMock.EXPECT().Get(testCtx).Return(config.GlobalConfig{}, assert.AnError) - doguConfigRepoMock := newMockDoguConfigRepository(t) useCase := &DoguInstallationUseCase{ doguRepo: doguRepoMock, - doguConfigRepo: doguConfigRepoMock, globalConfigRepo: globalConfigRepoMock, } @@ -1001,8 +1064,40 @@ func TestDoguInstallationUseCase_CheckDogusUpToDate(t *testing.T) { useCase := &DoguInstallationUseCase{ doguRepo: doguRepoMock, - doguConfigRepo: doguConfigRepoMock, globalConfigRepo: globalConfigRepoMock, + doguConfigRepo: doguConfigRepoMock, + } + + // when + dogusNotUpToDate, err := useCase.CheckDogusUpToDate(testCtx) + // then + require.Error(t, err) + require.Nil(t, dogusNotUpToDate) + }) + t.Run("error on sensitive dogu config Get error", func(t *testing.T) { + // given + doguRepoMock := newMockDoguInstallationRepository(t) + doguRepoMock.EXPECT().GetAll(testCtx).Return(map[cescommons.SimpleName]*ecosystem.DoguInstallation{ + "postgresql": { + Name: postgresqlQualifiedName, + Version: version3211, + InstalledVersion: version3211, + StartedAt: timeJuly, + }, + }, nil) + + globalConfigRepoMock := newMockGlobalConfigRepository(t) + globalConfigRepoMock.EXPECT().Get(testCtx).Return(config.GlobalConfig{}, nil) + doguConfigRepoMock := newMockDoguConfigRepository(t) + doguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(config.DoguConfig{}, nil) + sensitiveDoguConfigRepoMock := newMockSensitiveDoguConfigRepository(t) + sensitiveDoguConfigRepoMock.EXPECT().Get(testCtx, postgresqlQualifiedName.SimpleName).Return(config.DoguConfig{}, assert.AnError) + + useCase := &DoguInstallationUseCase{ + doguRepo: doguRepoMock, + globalConfigRepo: globalConfigRepoMock, + doguConfigRepo: doguConfigRepoMock, + sensitiveDoguConfigRepo: sensitiveDoguConfigRepoMock, } // when diff --git a/pkg/application/mock_blueprintSpecRepository_test.go b/pkg/application/mock_blueprintSpecRepository_test.go index 3abe7769..40d0ce4b 100644 --- a/pkg/application/mock_blueprintSpecRepository_test.go +++ b/pkg/application/mock_blueprintSpecRepository_test.go @@ -138,6 +138,64 @@ func (_c *mockBlueprintSpecRepository_GetById_Call) RunAndReturn(run func(contex return _c } +// ListIds provides a mock function with given fields: ctx +func (_m *mockBlueprintSpecRepository) ListIds(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ListIds") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockBlueprintSpecRepository_ListIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListIds' +type mockBlueprintSpecRepository_ListIds_Call struct { + *mock.Call +} + +// ListIds is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockBlueprintSpecRepository_Expecter) ListIds(ctx interface{}) *mockBlueprintSpecRepository_ListIds_Call { + return &mockBlueprintSpecRepository_ListIds_Call{Call: _e.mock.On("ListIds", ctx)} +} + +func (_c *mockBlueprintSpecRepository_ListIds_Call) Run(run func(ctx context.Context)) *mockBlueprintSpecRepository_ListIds_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockBlueprintSpecRepository_ListIds_Call) Return(_a0 []string, _a1 error) *mockBlueprintSpecRepository_ListIds_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockBlueprintSpecRepository_ListIds_Call) RunAndReturn(run func(context.Context) ([]string, error)) *mockBlueprintSpecRepository_ListIds_Call { + _c.Call.Return(run) + return _c +} + // Update provides a mock function with given fields: ctx, blueprintSpec func (_m *mockBlueprintSpecRepository) Update(ctx context.Context, blueprintSpec *domain.BlueprintSpec) error { ret := _m.Called(ctx, blueprintSpec) diff --git a/pkg/bootstrap.go b/pkg/bootstrap.go index 052551b7..8acf9043 100644 --- a/pkg/bootstrap.go +++ b/pkg/bootstrap.go @@ -56,7 +56,7 @@ func Bootstrap(restConfig *rest.Config, eventRecorder record.EventRecorder, name sensitiveDoguConfigRepo := adapterconfigk8s.NewSensitiveDoguConfigRepository(*k8sSensitiveDoguConfigRepo) sensitiveConfigRefReader := sensitiveconfigref.NewSecretRefReader(ecosystemClientSet.CoreV1().Secrets(namespace)) k8sGlobalConfigRepo := repository.NewGlobalConfigRepository(ecosystemClientSet.CoreV1().ConfigMaps(namespace)) - globalConfigRepoAdapter := adapterconfigk8s.NewGlobalConfigRepository(*k8sGlobalConfigRepo) + globalConfigRepo := adapterconfigk8s.NewGlobalConfigRepository(*k8sGlobalConfigRepo) doguRepo := dogucr.NewDoguInstallationRepo(dogusInterface.Dogus(namespace)) @@ -65,12 +65,12 @@ func Bootstrap(restConfig *rest.Config, eventRecorder record.EventRecorder, name validateMountsUseCase := domainservice.NewValidateAdditionalMountsDomainUseCase(remoteDoguRegistry) blueprintValidationUseCase := application.NewBlueprintSpecValidationUseCase(blueprintRepo, validateDependenciesUseCase, validateMountsUseCase) effectiveBlueprintUseCase := application.NewEffectiveBlueprintUseCase(blueprintRepo) - stateDiffUseCase := application.NewStateDiffUseCase(blueprintRepo, doguRepo, globalConfigRepoAdapter, doguConfigRepo, sensitiveDoguConfigRepo, sensitiveConfigRefReader) - doguInstallationUseCase := application.NewDoguInstallationUseCase(blueprintRepo, doguRepo, doguConfigRepo, globalConfigRepoAdapter) + stateDiffUseCase := application.NewStateDiffUseCase(blueprintRepo, doguRepo, globalConfigRepo, doguConfigRepo, sensitiveDoguConfigRepo, sensitiveConfigRefReader) + doguInstallationUseCase := application.NewDoguInstallationUseCase(blueprintRepo, doguRepo, globalConfigRepo, doguConfigRepo, sensitiveDoguConfigRepo) ecosystemHealthUseCase := application.NewEcosystemHealthUseCase(doguInstallationUseCase, blueprintRepo) completeBlueprintSpecUseCase := application.NewCompleteBlueprintUseCase(blueprintRepo) applyDogusUseCase := application.NewApplyDogusUseCase(blueprintRepo, doguInstallationUseCase) - ConfigUseCase := application.NewEcosystemConfigUseCase(blueprintRepo, doguConfigRepo, sensitiveDoguConfigRepo, globalConfigRepoAdapter, doguRepo) + ConfigUseCase := application.NewEcosystemConfigUseCase(blueprintRepo, doguConfigRepo, sensitiveDoguConfigRepo, globalConfigRepo, doguRepo) dogusUpToDateUseCase := application.NewDogusUpToDateUseCase(blueprintRepo, doguInstallationUseCase) preparationUseCases := application.NewBlueprintPreparationUseCase( @@ -88,7 +88,11 @@ func Bootstrap(restConfig *rest.Config, eventRecorder record.EventRecorder, name dogusUpToDateUseCase, ) blueprintChangeUseCase := application.NewBlueprintSpecChangeUseCase(blueprintRepo, preparationUseCases, applyUseCases) - blueprintReconciler := reconciler.NewBlueprintReconciler(blueprintChangeUseCase) + debounceWindow, err := config.GetDebounceWindow() + if err != nil { + return nil, err + } + blueprintReconciler := reconciler.NewBlueprintReconciler(blueprintChangeUseCase, blueprintRepo, namespace, debounceWindow) return &ApplicationContext{ Reconciler: blueprintReconciler, diff --git a/pkg/config/config.go b/pkg/config/config.go index f8a20f48..1d053514 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -6,6 +6,7 @@ import ( "os" "strconv" "strings" + "time" "github.com/Masterminds/semver/v3" ctrl "sigs.k8s.io/controller-runtime" @@ -14,11 +15,12 @@ import ( ) const ( - StageDevelopment = "development" - StageProduction = "production" - StageEnvVar = "STAGE" - namespaceEnvVar = "NAMESPACE" - logLevelEnvVar = "LOG_LEVEL" + StageDevelopment = "development" + StageProduction = "production" + StageEnvVar = "STAGE" + namespaceEnvVar = "NAMESPACE" + logLevelEnvVar = "LOG_LEVEL" + debounceWindowEnvVar = "DEBOUNCE_WINDOW" ) const ( @@ -33,7 +35,7 @@ const registryCacheDir = "/tmp/dogu-registry-cache" var log = ctrl.Log.WithName("config") var Stage = StageProduction -// OperatorConfig contains all configurable values for the dogu operator. +// OperatorConfig contains all configurable values for the blueprint operator. type OperatorConfig struct { // Version contains the current version of the operator Version *semver.Version @@ -187,3 +189,15 @@ func GetRemoteCredentials() (*core.Credentials, error) { Password: password, }, nil } + +func GetDebounceWindow() (time.Duration, error) { + windowSecString, err := getEnvVar(debounceWindowEnvVar) + if err != nil { + return time.Duration(0), fmt.Errorf("failed to get env var [%s]: %w", debounceWindowEnvVar, err) + } + window, err := time.ParseDuration(windowSecString) + if err != nil { + return time.Duration(0), fmt.Errorf("failed to parse env var [%s] to duration: %w", debounceWindowEnvVar, err) + } + return window, nil +} diff --git a/pkg/domain/ecosystem/doguInstallation.go b/pkg/domain/ecosystem/doguInstallation.go index df20554c..0a32cff8 100644 --- a/pkg/domain/ecosystem/doguInstallation.go +++ b/pkg/domain/ecosystem/doguInstallation.go @@ -120,8 +120,10 @@ func (dogu *DoguInstallation) IsVersionUpToDate() bool { return dogu.Version.IsEqualTo(dogu.InstalledVersion) } -func (dogu *DoguInstallation) IsConfigUpToDate(globalConfigUpdateTime *metav1.Time, doguConfigUpdateTime *metav1.Time) bool { - return !dogu.StartedAt.Before(globalConfigUpdateTime) && !dogu.StartedAt.Before(doguConfigUpdateTime) +func (dogu *DoguInstallation) IsConfigUpToDate(globalConfigUpdateTime *metav1.Time, doguConfigUpdateTime *metav1.Time, sensitiveDoguConfigUpdateTime *metav1.Time) bool { + return !dogu.StartedAt.Before(globalConfigUpdateTime) && + !dogu.StartedAt.Before(doguConfigUpdateTime) && + !dogu.StartedAt.Before(sensitiveDoguConfigUpdateTime) } func (dogu *DoguInstallation) Upgrade(newVersion *core.Version) { diff --git a/pkg/domain/ecosystem/doguInstallation_test.go b/pkg/domain/ecosystem/doguInstallation_test.go index b52c17f0..ab9d8508 100644 --- a/pkg/domain/ecosystem/doguInstallation_test.go +++ b/pkg/domain/ecosystem/doguInstallation_test.go @@ -318,7 +318,7 @@ func TestDoguInstallation_IsConfigUpToDate(t *testing.T) { dogu := &DoguInstallation{ StartedAt: tt.StartedAt, } - assert.Equalf(t, tt.want, dogu.IsConfigUpToDate(tt.args.globalConfigUpdateTime, tt.args.doguConfigUpdateTime), "IsConfigUpToDate(%v, %v)", tt.args.globalConfigUpdateTime, tt.args.doguConfigUpdateTime) + assert.Equalf(t, tt.want, dogu.IsConfigUpToDate(tt.args.globalConfigUpdateTime, tt.args.doguConfigUpdateTime, nil), "IsConfigUpToDate(%v, %v)", tt.args.globalConfigUpdateTime, tt.args.doguConfigUpdateTime) }) } } diff --git a/pkg/domainservice/adapterInterfaces.go b/pkg/domainservice/adapterInterfaces.go index 0106e79c..b647ed57 100644 --- a/pkg/domainservice/adapterInterfaces.go +++ b/pkg/domainservice/adapterInterfaces.go @@ -50,6 +50,11 @@ type BlueprintSpecRepository interface { // - returns an InternalError if there is any error, e.g. a connection error. Count(ctx context.Context, limit int) (int, error) + // ListIds retrieves all Blueprint-Ids. + // - It returns a list of Ids containing all blueprint Ids, or + // - an InternalError if the operation fails. + ListIds(ctx context.Context) ([]string, error) + // Update updates a given BlueprintSpec. // returns a ConflictError if there were changes on the BlueprintSpec in the meantime or // returns an InternalError if there is any other error diff --git a/pkg/domainservice/mock_BlueprintSpecRepository_test.go b/pkg/domainservice/mock_BlueprintSpecRepository_test.go index 3777c4ea..72dc0f76 100644 --- a/pkg/domainservice/mock_BlueprintSpecRepository_test.go +++ b/pkg/domainservice/mock_BlueprintSpecRepository_test.go @@ -138,6 +138,64 @@ func (_c *MockBlueprintSpecRepository_GetById_Call) RunAndReturn(run func(contex return _c } +// ListIds provides a mock function with given fields: ctx +func (_m *MockBlueprintSpecRepository) ListIds(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ListIds") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBlueprintSpecRepository_ListIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListIds' +type MockBlueprintSpecRepository_ListIds_Call struct { + *mock.Call +} + +// ListIds is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockBlueprintSpecRepository_Expecter) ListIds(ctx interface{}) *MockBlueprintSpecRepository_ListIds_Call { + return &MockBlueprintSpecRepository_ListIds_Call{Call: _e.mock.On("ListIds", ctx)} +} + +func (_c *MockBlueprintSpecRepository_ListIds_Call) Run(run func(ctx context.Context)) *MockBlueprintSpecRepository_ListIds_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockBlueprintSpecRepository_ListIds_Call) Return(_a0 []string, _a1 error) *MockBlueprintSpecRepository_ListIds_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBlueprintSpecRepository_ListIds_Call) RunAndReturn(run func(context.Context) ([]string, error)) *MockBlueprintSpecRepository_ListIds_Call { + _c.Call.Return(run) + return _c +} + // Update provides a mock function with given fields: ctx, blueprintSpec func (_m *MockBlueprintSpecRepository) Update(ctx context.Context, blueprintSpec *domain.BlueprintSpec) error { ret := _m.Called(ctx, blueprintSpec)