diff --git a/pkg/sync/doc.go b/pkg/sync/doc.go index f4f5d8725..dbc033653 100644 --- a/pkg/sync/doc.go +++ b/pkg/sync/doc.go @@ -59,10 +59,11 @@ Hooks can be deleted in an automatic fashion using the annotation: argocd.argopr argocd.argoproj.io/hook: PostSync argocd.argoproj.io/hook-delete-policy: HookSucceeded +Hook deletion policies are governed by sync success and failure. A successful sync operation requires all hooks to complete successfully. A sync will fail if _any_ hooks fail. The following policies define when the hook will be deleted. - - HookSucceeded - the hook resource is deleted after the hook succeeded (e.g. Job/Workflow completed successfully). - - HookFailed - the hook resource is deleted after the hook failed. + - HookSucceeded - the hook resource is deleted if the sync succeeds + - HookFailed - the hook resource is deleted if the sync fails. - BeforeHookCreation - any existing hook resource is deleted before the new one is created # Sync Waves diff --git a/pkg/sync/helper_test.go b/pkg/sync/helper_test.go new file mode 100644 index 000000000..f1938cf2a --- /dev/null +++ b/pkg/sync/helper_test.go @@ -0,0 +1,84 @@ +package sync + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/argoproj/gitops-engine/pkg/health" + synccommon "github.com/argoproj/gitops-engine/pkg/sync/common" + "github.com/argoproj/gitops-engine/pkg/sync/hook" + "github.com/argoproj/gitops-engine/pkg/utils/kube" + testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" +) + +type resourceNameHealthOverride map[string]health.HealthStatusCode + +func (r resourceNameHealthOverride) GetResourceHealth(obj *unstructured.Unstructured) (*health.HealthStatus, error) { + if status, ok := r[obj.GetName()]; ok { + return &health.HealthStatus{Status: status, Message: "test"}, nil + } + return nil, nil +} + +func getResourceResult(resources []synccommon.ResourceSyncResult, resourceKey kube.ResourceKey) *synccommon.ResourceSyncResult { + for _, res := range resources { + if res.ResourceKey == resourceKey { + return &res + } + } + return nil +} + +func newHook(name string, hookType synccommon.HookType, deletePolicy synccommon.HookDeletePolicy) *unstructured.Unstructured { + obj := testingutils.NewPod() + obj.SetName(name) + obj.SetNamespace(testingutils.FakeArgoCDNamespace) + testingutils.Annotate(obj, synccommon.AnnotationKeyHook, string(hookType)) + testingutils.Annotate(obj, synccommon.AnnotationKeyHookDeletePolicy, string(deletePolicy)) + obj.SetFinalizers([]string{hook.HookFinalizer}) + return obj +} + +func withReplaceAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionReplace}) + return un +} + +func withServerSideApplyAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionServerSideApply}) + return un +} + +func withDisableServerSideApplyAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionDisableServerSideApply}) + return un +} + +func withReplaceAndServerSideApplyAnnotations(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: "Replace=true,ServerSideApply=true"}) + return un +} + +func withForceAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionForce}) + return un +} + +func withForceAndReplaceAnnotations(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: "Force=true,Replace=true"}) + return un +} + +func createNamespaceTask(namespace string) (*syncTask, error) { + nsSpec := &corev1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: namespace}} + unstructuredObj, err := kube.ToUnstructured(nsSpec) + + task := &syncTask{phase: synccommon.SyncPhasePreSync, targetObj: unstructuredObj} + if err != nil { + return task, fmt.Errorf("failed to convert namespace spec to unstructured: %w", err) + } + return task, nil +} diff --git a/pkg/sync/sync_context.go b/pkg/sync/sync_context.go index 8f4d51e4f..2ded02738 100644 --- a/pkg/sync/sync_context.go +++ b/pkg/sync/sync_context.go @@ -391,25 +391,43 @@ type syncContext struct { modificationResult map[kubeutil.ResourceKey]bool } -func (sc *syncContext) setRunningPhase(tasks []*syncTask, isPendingDeletion bool) { - if len(tasks) > 0 { - firstTask := tasks[0] - waitingFor := "completion of hook" - andMore := "hooks" - if !firstTask.isHook() { - waitingFor = "healthy state of" - andMore = "resources" - } - if isPendingDeletion { - waitingFor = "deletion of" - } - message := fmt.Sprintf("waiting for %s %s/%s/%s", - waitingFor, firstTask.group(), firstTask.kind(), firstTask.name()) - if moreTasks := len(tasks) - 1; moreTasks > 0 { - message = fmt.Sprintf("%s and %d more %s", message, moreTasks, andMore) +func (sc *syncContext) setRunningPhase(tasks syncTasks, isPendingDeletion bool) { + if tasks.Len() == 0 { + sc.setOperationPhase(common.OperationRunning, "") + return + } + + hooks, resources := tasks.Split(func(task *syncTask) bool { return task.isHook() }) + + waitingFor := "completion of hook" + andMore := "resources" + + if hooks.Len() == 0 { + waitingFor = "healthy state of" + } + if resources.Len() == 0 { + andMore = "hooks" + } + + if isPendingDeletion { + waitingFor = "deletion of" + if hooks.Len() != 0 { + waitingFor += " hook" } - sc.setOperationPhase(common.OperationRunning, message) } + + var firstTask *syncTask + if hooks.Len() != 0 { + firstTask = hooks[0] + } else { + firstTask = resources[0] + } + + message := fmt.Sprintf("waiting for %s %s/%s/%s", waitingFor, firstTask.group(), firstTask.kind(), firstTask.name()) + if moreTasks := len(tasks) - 1; moreTasks > 0 { + message = fmt.Sprintf("%s and %d more %s", message, moreTasks, andMore) + } + sc.setOperationPhase(common.OperationRunning, message) } // sync has performs the actual apply or hook based sync @@ -439,7 +457,7 @@ func (sc *syncContext) Sync() { // No need to perform a dry-run on the namespace creation, because if it fails we stop anyway sc.log.WithValues("task", nsCreateTask).Info("Creating namespace") if sc.runTasks(nsSyncTasks, false) == failed { - sc.setOperationFailed(syncTasks{}, nsSyncTasks, "the namespace failed to apply") + sc.executeSyncFailPhase(syncTasks{}, nsSyncTasks, "the namespace failed to apply") return } @@ -467,9 +485,9 @@ func (sc *syncContext) Sync() { // update the hook's result operationState, message, err := sc.getOperationPhase(task.liveObj) if err != nil { - sc.setResourceResult(task, "", common.OperationError, fmt.Sprintf("failed to get resource health: %v", err)) + sc.setResourceResult(task, task.syncStatus, common.OperationError, fmt.Sprintf("failed to get resource health: %v", err)) } else { - sc.setResourceResult(task, "", operationState, message) + sc.setResourceResult(task, task.syncStatus, operationState, message) } } else { // this must be calculated on the live object @@ -534,12 +552,13 @@ func (sc *syncContext) Sync() { // syncFailTasks only run during failure, so separate them from regular tasks syncFailTasks, tasks := tasks.Split(func(t *syncTask) bool { return t.phase == common.SyncPhaseSyncFail }) - syncFailedTasks, _ := tasks.Split(func(t *syncTask) bool { return t.syncStatus == common.ResultCodeSyncFailed }) + syncFailedTasks := tasks.Filter(func(t *syncTask) bool { return t.syncStatus == common.ResultCodeSyncFailed }) // if there are any completed but unsuccessful tasks, sync is a failure. + // we already know tasks do not contain running tasks if tasks.Any(func(t *syncTask) bool { return t.completed() && !t.successful() }) { sc.deleteHooks(hooksPendingDeletionFailed) - sc.setOperationFailed(syncFailTasks, syncFailedTasks, "one or more synchronization tasks completed unsuccessfully") + sc.executeSyncFailPhase(syncFailTasks, syncFailedTasks, "one or more synchronization tasks completed unsuccessfully") return } @@ -560,18 +579,12 @@ func (sc *syncContext) Sync() { return } - // remove any tasks not in this wave phase := tasks.phase() wave := tasks.wave() finalWave := phase == tasks.lastPhase() && wave == tasks.lastWave() - // if it is the last phase/wave and the only remaining tasks are non-hooks, the we are successful - // EVEN if those objects subsequently degraded - // This handles the common case where neither hooks or waves are used and a sync equates to simply an (asynchronous) kubectl apply of manifests, which succeeds immediately. - remainingTasks := tasks.Filter(func(t *syncTask) bool { return t.phase != phase || wave != t.wave() || t.isHook() }) - sc.log.WithValues("phase", phase, "wave", wave, "tasks", tasks, "syncFailTasks", syncFailTasks).V(1).Info("Filtering tasks in correct phase and wave") - tasks = tasks.Filter(func(t *syncTask) bool { return t.phase == phase && t.wave() == wave }) + tasks, remainingTasks := tasks.Split(func(t *syncTask) bool { return t.phase == phase && t.wave() == wave }) sc.setOperationPhase(common.OperationRunning, "one or more tasks are running") @@ -581,8 +594,10 @@ func (sc *syncContext) Sync() { if sc.syncWaveHook != nil && runState != failed { err := sc.syncWaveHook(phase, wave, finalWave) if err != nil { - sc.deleteHooks(hooksPendingDeletionFailed) - sc.setOperationPhase(common.OperationFailed, fmt.Sprintf("SyncWaveHook failed: %v", err)) + // Since this is an unexpected error and is not related to a specific task, terminate the sync with error + // without triggering the syncFailTasks + sc.terminateHooksPreemptively(tasks.Filter(func(task *syncTask) bool { return task.isHook() })) + sc.setOperationPhase(common.OperationError, fmt.Sprintf("SyncWaveHook failed: %v", err)) sc.log.Error(err, "SyncWaveHook failed") return } @@ -590,24 +605,79 @@ func (sc *syncContext) Sync() { switch runState { case failed: - syncFailedTasks, _ := tasks.Split(func(t *syncTask) bool { return t.syncStatus == common.ResultCodeSyncFailed }) - sc.deleteHooks(hooksPendingDeletionFailed) - sc.setOperationFailed(syncFailTasks, syncFailedTasks, "one or more objects failed to apply") + // If we failed to apply at least one resource, we need to start the syncFailTasks and wait + // for the completion of any running hooks. In this case, the operation should be running. + syncFailedTasks := tasks.Filter(func(t *syncTask) bool { return t.syncStatus == common.ResultCodeSyncFailed }) + runningHooks := tasks.Filter(func(t *syncTask) bool { return t.running() }) + if len(runningHooks) > 0 { + if len(syncFailTasks) > 0 { + completed := sc.executeSyncFailPhase(syncFailTasks, syncFailedTasks, "one or more objects failed to apply") + if !completed { + runningHooks = append(runningHooks, syncFailTasks...) + } + } + sc.setRunningPhase(runningHooks, false) + } else { + completed := sc.executeSyncFailPhase(syncFailTasks, syncFailedTasks, "one or more objects failed to apply") + if completed { + sc.deleteHooks(hooksPendingDeletionFailed) + } + } case successful: - if remainingTasks.Len() == 0 { + if remainingTasks.Len() == 0 && !tasks.Any(func(task *syncTask) bool { return task.isHook() }) { + // if it is the last phase/wave and the only running tasks are non-hooks, then we are successful + // EVEN if those objects subsequently degrades + // This handles the common case where neither hooks or waves are used and a sync equates to simply + // an (asynchronous) kubectl apply of manifests, which succeeds immediately. + // delete all completed hooks which have appropriate delete policy sc.deleteHooks(hooksPendingDeletionSuccessful) sc.setOperationPhase(common.OperationSucceeded, "successfully synced (all tasks run)") } else { - sc.setRunningPhase(remainingTasks, false) + sc.setRunningPhase(tasks, false) } default: sc.setRunningPhase(tasks.Filter(func(task *syncTask) bool { - return task.deleteOnPhaseCompletion() + return task.deleteBeforeCreation() || (task.isPrune() && task.pending()) }), true) } } +// terminate looks for any running jobs/workflow hooks and deletes the resource +func (sc *syncContext) Terminate() { + sc.log.V(1).Info("terminating") + tasks, _ := sc.getSyncTasks() + + // Remove completed hook finalizers + hooksCompleted := tasks.Filter(func(task *syncTask) bool { + return task.isHook() && task.completed() + }) + for _, task := range hooksCompleted { + if err := sc.removeHookFinalizer(task); err != nil { + sc.setResourceResult(task, task.syncStatus, common.OperationError, fmt.Sprintf("Failed to remove hook finalizer: %v", err)) + } + } + + // Terminate running hooks + terminateSuccessful := sc.terminateHooksPreemptively(tasks.Filter(func(task *syncTask) bool { return task.isHook() })) + if terminateSuccessful { + sc.setOperationPhase(common.OperationFailed, "Operation terminated") + } else { + sc.setOperationPhase(common.OperationError, "Operation termination had errors") + } +} + +func (sc *syncContext) GetState() (common.OperationPhase, string, []common.ResourceSyncResult) { + var resourceRes []common.ResourceSyncResult + for _, v := range sc.syncRes { + resourceRes = append(resourceRes, v) + } + sort.Slice(resourceRes, func(i, j int) bool { + return resourceRes[i].Order < resourceRes[j].Order + }) + return sc.phase, sc.message, resourceRes +} + // filter out out-of-sync tasks func (sc *syncContext) filterOutOfSyncTasks(tasks syncTasks) syncTasks { return tasks.Filter(func(t *syncTask) bool { @@ -635,6 +705,65 @@ func (sc *syncContext) getNamespaceCreationTask(tasks syncTasks) *syncTask { return nil } +// terminateHooksPreemptively terminates ongoing hook tasks +func (sc *syncContext) terminateHooksPreemptively(tasks syncTasks) bool { + terminateSuccessful := true + for _, task := range tasks { + if !task.isHook() || !task.running() { + continue + } + + if task.liveObj == nil { + // if we terminate preemtively after the task was run, it will not contain the live object yet + liveObj, err := sc.getResource(task) + if err != nil && !apierrors.IsNotFound(err) { + sc.setResourceResult(task, task.syncStatus, common.OperationError, fmt.Sprintf("Failed to get live resource: %v", err)) + terminateSuccessful = false + continue + } + task.liveObj = liveObj + } + + if task.liveObj == nil { + sc.setResourceResult(task, task.syncStatus, common.OperationSucceeded, "Terminated") + continue + } + + // get the latest status of the running hook. Perhaps it already completed + phase, msg, statusErr := sc.getOperationPhase(task.liveObj) + if statusErr != nil { + sc.setResourceResult(task, task.syncStatus, common.OperationError, fmt.Sprintf("Failed to get hook health: %v", statusErr)) + } + + // Now that we have the latest status, we can remove the finalizer. + if err := sc.removeHookFinalizer(task); err != nil { + sc.setResourceResult(task, task.syncStatus, common.OperationError, fmt.Sprintf("Failed to remove hook finalizer: %v", err)) + terminateSuccessful = false + continue + } + + // delete the hook if it is running, if we dont know that it is running, + // or if it has just completed and is meant to be deleted on sync failed + if statusErr != nil || phase.Running() || task.deleteOnPhaseFailed() { + err := sc.deleteResource(task) + if err != nil && !apierrors.IsNotFound(err) { + sc.setResourceResult(task, task.syncStatus, common.OperationFailed, fmt.Sprintf("Failed to delete: %v", err)) + terminateSuccessful = false + continue + } + } + + if phase.Completed() { + // If the hook has completed, we can update it to it's real status + sc.setResourceResult(task, task.syncStatus, phase, msg) + } else if task.operationState != common.OperationError { + // update the status if the resource is not in error to preserve the error message + sc.setResourceResult(task, task.syncStatus, common.OperationFailed, "Terminated") + } + } + return terminateSuccessful +} + func (sc *syncContext) removeHookFinalizer(task *syncTask) error { if task.liveObj == nil { return nil @@ -663,11 +792,7 @@ func (sc *syncContext) removeHookFinalizer(task *syncTask) error { updateErr := sc.updateResource(task) if apierrors.IsConflict(updateErr) { sc.log.WithValues("task", task).V(1).Info("Retrying hook finalizer removal due to conflict on update") - resIf, err := sc.getResourceIf(task, "get") - if err != nil { - return fmt.Errorf("failed to get resource interface: %w", err) - } - liveObj, err := resIf.Get(context.TODO(), task.liveObj.GetName(), metav1.GetOptions{}) + liveObj, err := sc.getResource(task) if apierrors.IsNotFound(err) { sc.log.WithValues("task", task).V(1).Info("Resource is already deleted") return nil @@ -687,40 +812,16 @@ func (sc *syncContext) removeHookFinalizer(task *syncTask) error { }) } -func (sc *syncContext) updateResource(task *syncTask) error { - sc.log.WithValues("task", task).V(1).Info("Updating resource") - resIf, err := sc.getResourceIf(task, "update") - if err != nil { - return err - } - _, err = resIf.Update(context.TODO(), task.liveObj, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("failed to update resource: %w", err) - } - return nil -} - func (sc *syncContext) deleteHooks(hooksPendingDeletion syncTasks) { for _, task := range hooksPendingDeletion { err := sc.deleteResource(task) if err != nil && !apierrors.IsNotFound(err) { - sc.setResourceResult(task, "", common.OperationError, fmt.Sprintf("failed to delete resource: %v", err)) + sc.setResourceResult(task, task.syncStatus, common.OperationError, fmt.Sprintf("failed to delete resource: %v", err)) } } } -func (sc *syncContext) GetState() (common.OperationPhase, string, []common.ResourceSyncResult) { - var resourceRes []common.ResourceSyncResult - for _, v := range sc.syncRes { - resourceRes = append(resourceRes, v) - } - sort.Slice(resourceRes, func(i, j int) bool { - return resourceRes[i].Order < resourceRes[j].Order - }) - return sc.phase, sc.message, resourceRes -} - -func (sc *syncContext) setOperationFailed(syncFailTasks, syncFailedTasks syncTasks, message string) { +func (sc *syncContext) executeSyncFailPhase(syncFailTasks, syncFailedTasks syncTasks, message string) (completed bool) { errorMessageFactory := func(tasks syncTasks, message string) string { messages := tasks.Map(func(task *syncTask) string { return task.message @@ -733,23 +834,30 @@ func (sc *syncContext) setOperationFailed(syncFailTasks, syncFailedTasks syncTas errorMessage := errorMessageFactory(syncFailedTasks, message) - if len(syncFailTasks) > 0 { - // if all the failure hooks are completed, don't run them again, and mark the sync as failed - if syncFailTasks.All(func(task *syncTask) bool { return task.completed() }) { - sc.setOperationPhase(common.OperationFailed, errorMessage) - return - } - // otherwise, we need to start the failure hooks, and then return without setting - // the phase, so we make sure we have at least one more sync - sc.log.WithValues("syncFailTasks", syncFailTasks).V(1).Info("Running sync fail tasks") - if sc.runTasks(syncFailTasks, false) == failed { - failedSyncFailTasks := syncFailTasks.Filter(func(t *syncTask) bool { return t.syncStatus == common.ResultCodeSyncFailed }) + // if there is no failure hook, there is nothing more to do and we can fail + if len(syncFailTasks) == 0 { + sc.setOperationPhase(common.OperationFailed, errorMessage) + return true + } + + // if all the failure hooks are completed, mark the sync as failed + if syncFailTasks.All(func(task *syncTask) bool { return task.completed() }) { + failedSyncFailTasks := syncFailTasks.Filter(func(t *syncTask) bool { return t.syncStatus == common.ResultCodeSyncFailed }) + if len(failedSyncFailTasks) > 0 { syncFailTasksMessage := errorMessageFactory(failedSyncFailTasks, "one or more SyncFail hooks failed") - sc.setOperationPhase(common.OperationFailed, fmt.Sprintf("%s\n%s", errorMessage, syncFailTasksMessage)) + errorMessage = fmt.Sprintf("%s\n%s", errorMessage, syncFailTasksMessage) } - } else { sc.setOperationPhase(common.OperationFailed, errorMessage) + return true } + + // otherwise, we need to start the pending failure hooks, and then return WITHOUT setting + // the phase to failed, since we want the failure hooks to complete their running state before failing + pendingSyncFailTasks := syncFailTasks.Filter(func(task *syncTask) bool { return !task.completed() && !task.running() }) + sc.log.WithValues("syncFailTasks", pendingSyncFailTasks).V(1).Info("Running sync fail tasks") + sc.runTasks(pendingSyncFailTasks, false) + sc.setRunningPhase(pendingSyncFailTasks, false) + return false } func (sc *syncContext) started() bool { @@ -1274,42 +1382,30 @@ func (sc *syncContext) hasCRDOfGroupKind(group string, kind string) bool { return false } -// terminate looks for any running jobs/workflow hooks and deletes the resource -func (sc *syncContext) Terminate() { - terminateSuccessful := true - sc.log.V(1).Info("terminating") - tasks, _ := sc.getSyncTasks() - for _, task := range tasks { - if !task.isHook() || task.liveObj == nil { - continue - } - if err := sc.removeHookFinalizer(task); err != nil { - sc.setResourceResult(task, task.syncStatus, common.OperationError, fmt.Sprintf("Failed to remove hook finalizer: %v", err)) - terminateSuccessful = false - continue - } - phase, msg, err := sc.getOperationPhase(task.liveObj) - if err != nil { - sc.setOperationPhase(common.OperationError, fmt.Sprintf("Failed to get hook health: %v", err)) - return - } - if phase == common.OperationRunning { - err := sc.deleteResource(task) - if err != nil && !apierrors.IsNotFound(err) { - sc.setResourceResult(task, "", common.OperationFailed, fmt.Sprintf("Failed to delete: %v", err)) - terminateSuccessful = false - } else { - sc.setResourceResult(task, "", common.OperationSucceeded, "Deleted") - } - } else { - sc.setResourceResult(task, "", phase, msg) - } +func (sc *syncContext) getResource(task *syncTask) (*unstructured.Unstructured, error) { + sc.log.WithValues("task", task).V(1).Info("Getting resource") + resIf, err := sc.getResourceIf(task, "get") + if err != nil { + return nil, err } - if terminateSuccessful { - sc.setOperationPhase(common.OperationFailed, "Operation terminated") - } else { - sc.setOperationPhase(common.OperationError, "Operation termination had errors") + liveObj, err := resIf.Get(context.TODO(), task.name(), metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get resource: %w", err) + } + return liveObj, nil +} + +func (sc *syncContext) updateResource(task *syncTask) error { + sc.log.WithValues("task", task).V(1).Info("Updating resource") + resIf, err := sc.getResourceIf(task, "update") + if err != nil { + return err + } + _, err = resIf.Update(context.TODO(), task.liveObj, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update resource: %w", err) } + return nil } func (sc *syncContext) deleteResource(task *syncTask) error { @@ -1357,16 +1453,32 @@ func (sc *syncContext) runTasks(tasks syncTasks, dryRun bool) runState { sc.log.WithValues("numTasks", len(tasks), "dryRun", dryRun).V(1).Info("Running tasks") state := successful - var createTasks syncTasks - var pruneTasks syncTasks + pruneTasks, createTasks := tasks.Split(func(task *syncTask) bool { return task.isPrune() }) - for _, task := range tasks { - if task.isPrune() { - pruneTasks = append(pruneTasks, task) - } else { - createTasks = append(createTasks, task) + // remove finalizers from previous sync on existing hooks to make sure the operation is idempotent + { + ss := newStateSync(state) + existingHooks := tasks.Filter(func(t *syncTask) bool { return t.isHook() && t.pending() && t.liveObj != nil }) + for _, task := range existingHooks { + t := task + ss.Go(func(state runState) runState { + logCtx := sc.log.WithValues("dryRun", dryRun, "task", t) + logCtx.V(1).Info("Removing finalizers") + if !dryRun { + if err := sc.removeHookFinalizer(t); err != nil { + state = failed + sc.setResourceResult(t, t.syncStatus, common.OperationError, fmt.Sprintf("failed to remove hook finalizer: %v", err)) + } + } + return state + }) } + state = ss.Wait() } + if state != successful { + return state + } + // prune first { if !sc.pruneConfirmed { @@ -1406,7 +1518,6 @@ func (sc *syncContext) runTasks(tasks syncTasks, dryRun bool) runState { } state = ss.Wait() } - if state != successful { return state } @@ -1418,15 +1529,19 @@ func (sc *syncContext) runTasks(tasks syncTasks, dryRun bool) runState { for _, task := range hooksPendingDeletion { t := task ss.Go(func(state runState) runState { - sc.log.WithValues("dryRun", dryRun, "task", t).V(1).Info("Deleting") + log := sc.log.WithValues("dryRun", dryRun, "task", t).V(1) + log.Info("Deleting") if !dryRun { err := sc.deleteResource(t) if err != nil { // it is possible to get a race condition here, such that the resource does not exist when - // delete is requested, we treat this as a nop + // delete is requested, we treat this as a nop and remove the liveObj if !apierrors.IsNotFound(err) { state = failed - sc.setResourceResult(t, "", common.OperationError, fmt.Sprintf("failed to delete resource: %v", err)) + sc.setResourceResult(t, t.syncStatus, common.OperationError, fmt.Sprintf("failed to delete resource: %v", err)) + } else { + log.Info("Resource not found, treating as no-op and removing liveObj") + t.liveObj = nil } } else { // if there is anything that needs deleting, we are at best now in pending and @@ -1439,7 +1554,6 @@ func (sc *syncContext) runTasks(tasks syncTasks, dryRun bool) runState { } state = ss.Wait() } - if state != successful { return state } diff --git a/pkg/sync/sync_context_test.go b/pkg/sync/sync_context_test.go index 0e8d01ebb..64712d798 100644 --- a/pkg/sync/sync_context_test.go +++ b/pkg/sync/sync_context_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "net/http" "net/http/httptest" "reflect" @@ -41,28 +40,12 @@ import ( var standardVerbs = metav1.Verbs{"create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"} func newTestSyncCtx(getResourceFunc *func(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error), opts ...SyncOpt) *syncContext { - fakeDisco := &fakedisco.FakeDiscovery{Fake: &testcore.Fake{}} - fakeDisco.Resources = append(make([]*metav1.APIResourceList, 0), - &metav1.APIResourceList{ - GroupVersion: "v1", - APIResources: []metav1.APIResource{ - {Kind: "Pod", Group: "", Version: "v1", Namespaced: true, Verbs: standardVerbs}, - {Kind: "Service", Group: "", Version: "v1", Namespaced: true, Verbs: standardVerbs}, - {Kind: "Namespace", Group: "", Version: "v1", Namespaced: false, Verbs: standardVerbs}, - }, - }, - &metav1.APIResourceList{ - GroupVersion: "apps/v1", - APIResources: []metav1.APIResource{ - {Kind: "Deployment", Group: "apps", Version: "v1", Namespaced: true, Verbs: standardVerbs}, - }, - }) sc := syncContext{ config: &rest.Config{}, rawConfig: &rest.Config{}, namespace: testingutils.FakeArgoCDNamespace, revision: "FooBarBaz", - disco: fakeDisco, + disco: &fakedisco.FakeDiscovery{Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources}}, log: textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app"), resources: map[kube.ResourceKey]reconciledResource{}, syncRes: map[string]synccommon.ResourceSyncResult{}, @@ -145,6 +128,7 @@ func TestSyncNamespaceCreatedBeforeDryRunWithFailure(t *testing.T) { return true, nil }), func(ctx *syncContext) { resourceOps := ctx.resourceOps.(*kubetest.MockResourceOps) + resourceOps.ExecuteForDryRun = true resourceOps.Commands = map[string]kubetest.KubectlOutput{} resourceOps.Commands[pod.GetName()] = kubetest.KubectlOutput{ Output: "should not be returned", @@ -297,19 +281,27 @@ func TestSyncCustomResources(t *testing.T) { } func TestSyncSuccessfully(t *testing.T) { - syncCtx := newTestSyncCtx(nil, WithOperationSettings(false, true, false, false)) + newSvc := testingutils.NewService() pod := testingutils.NewPod() pod.SetNamespace(testingutils.FakeArgoCDNamespace) + + syncCtx := newTestSyncCtx(nil, WithOperationSettings(false, true, false, false), + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + newSvc.GetName(): health.HealthStatusDegraded, // Always failed + }))) + syncCtx.resources = groupResources(ReconciliationResult{ Live: []*unstructured.Unstructured{nil, pod}, - Target: []*unstructured.Unstructured{testingutils.NewService(), nil}, + Target: []*unstructured.Unstructured{newSvc, nil}, }) + // Since we only have one step, we consider the sync successful if the manifest were applied correctly. + // In this case, we do not need to run the sync again to evaluate the health syncCtx.Sync() phase, _, resources := syncCtx.GetState() - assert.Equal(t, synccommon.OperationSucceeded, phase) assert.Len(t, resources, 2) + for i := range resources { result := resources[i] switch result.ResourceKey.Kind { @@ -325,6 +317,48 @@ func TestSyncSuccessfully(t *testing.T) { } } +func TestSyncSuccessfully_Multistep(t *testing.T) { + newSvc := testingutils.NewService() + newSvc.SetNamespace(testingutils.FakeArgoCDNamespace) + testingutils.Annotate(newSvc, synccommon.AnnotationSyncWave, "0") + + newSvc2 := testingutils.NewService() + newSvc.SetNamespace(testingutils.FakeArgoCDNamespace) + newSvc2.SetName("new-svc-2") + testingutils.Annotate(newSvc2, synccommon.AnnotationSyncWave, "5") + + syncCtx := newTestSyncCtx(nil, WithOperationSettings(false, true, false, false), + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + newSvc.GetName(): health.HealthStatusHealthy, + }))) + + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{nil, nil}, + Target: []*unstructured.Unstructured{newSvc, newSvc2}, + }) + + // Since we have multiple step, we need to run the sync again to evaluate the health of current phase + // (wave 0) and start the new phase (wave 5). + syncCtx.Sync() + phase, message, resources := syncCtx.GetState() + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Equal(t, "waiting for healthy state of /Service/my-service", message) + assert.Len(t, resources, 1) + assert.Equal(t, kube.GetResourceKey(newSvc), resources[0].ResourceKey) + + // Update the live resources for the next sync + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{newSvc, nil}, + Target: []*unstructured.Unstructured{newSvc, newSvc2}, + }) + + syncCtx.Sync() + phase, message, resources = syncCtx.GetState() + assert.Equal(t, synccommon.OperationSucceeded, phase) + assert.Equal(t, "successfully synced (all tasks run)", message) + assert.Len(t, resources, 2) +} + func TestSyncDeleteSuccessfully(t *testing.T) { syncCtx := newTestSyncCtx(nil, WithOperationSettings(false, true, false, false)) svc := testingutils.NewService() @@ -800,11 +834,6 @@ func TestSyncOptionValidate(t *testing.T) { } } -func withReplaceAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { - un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionReplace}) - return un -} - func TestSync_Replace(t *testing.T) { testCases := []struct { name string @@ -839,19 +868,37 @@ func TestSync_Replace(t *testing.T) { } } -func withServerSideApplyAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { - un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionServerSideApply}) - return un -} +func TestSync_HookWithReplaceAndBeforeHookCreation_AlreadyDeleted(t *testing.T) { + // This test a race condition when Delete is called on an already deleted object + // LiveObj is set, but then the resource is deleted asynchronously in kubernetes + syncCtx := newTestSyncCtx(nil) -func withDisableServerSideApplyAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { - un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionDisableServerSideApply}) - return un -} + target := withReplaceAnnotation(testingutils.NewPod()) + target.SetNamespace(testingutils.FakeArgoCDNamespace) + target = testingutils.Annotate(target, synccommon.AnnotationKeyHookDeletePolicy, string(synccommon.HookDeletePolicyBeforeHookCreation)) + target = testingutils.Annotate(target, synccommon.AnnotationKeyHook, string(synccommon.SyncPhasePreSync)) + live := target.DeepCopy() -func withReplaceAndServerSideApplyAnnotations(un *unstructured.Unstructured) *unstructured.Unstructured { - un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: "Replace=true,ServerSideApply=true"}) - return un + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{live}, + Target: []*unstructured.Unstructured{target}, + }) + syncCtx.hooks = []*unstructured.Unstructured{live} + + client := fake.NewSimpleDynamicClient(runtime.NewScheme()) + deleted := false + client.PrependReactor("delete", "pods", func(_ testcore.Action) (bool, runtime.Object, error) { + deleted = true + // simulate the race conditions where liveObj was not null, but is now deleted in k8s + return true, nil, apierrors.NewNotFound(corev1.Resource("pods"), live.GetName()) + }) + syncCtx.dynamicIf = client + + syncCtx.Sync() + + resourceOps, _ := syncCtx.resourceOps.(*kubetest.MockResourceOps) + assert.Equal(t, "create", resourceOps.GetLastResourceCommand(kube.GetResourceKey(target))) + assert.True(t, deleted) } func TestSync_ServerSideApply(t *testing.T) { @@ -926,16 +973,6 @@ func TestSyncContext_ServerSideApplyWithDryRun(t *testing.T) { } } -func withForceAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { - un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionForce}) - return un -} - -func withForceAndReplaceAnnotations(un *unstructured.Unstructured) *unstructured.Unstructured { - un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: "Force=true,Replace=true"}) - return un -} - func TestSync_Force(t *testing.T) { testCases := []struct { name string @@ -1063,6 +1100,7 @@ func TestDeDupingTasks(t *testing.T) { func TestObjectsGetANamespace(t *testing.T) { syncCtx := newTestSyncCtx(nil) pod := testingutils.NewPod() + pod.SetNamespace("") syncCtx.resources = groupResources(ReconciliationResult{ Live: []*unstructured.Unstructured{nil}, Target: []*unstructured.Unstructured{pod}, @@ -1233,305 +1271,422 @@ func TestNamespaceAutoCreationForNonExistingNs(t *testing.T) { }) } -func createNamespaceTask(namespace string) (*syncTask, error) { - nsSpec := &corev1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: namespace}} - unstructuredObj, err := kube.ToUnstructured(nsSpec) - - task := &syncTask{phase: synccommon.SyncPhasePreSync, targetObj: unstructuredObj} - if err != nil { - return task, fmt.Errorf("failed to convert namespace spec to unstructured: %w", err) - } - return task, nil -} +func TestSync_SuccessfulSyncWithSyncFailHook(t *testing.T) { + hook := newHook("hook-1", synccommon.HookTypeSync, synccommon.HookDeletePolicyBeforeHookCreation) + pod := testingutils.NewPod() + syncFailHook := newHook("sync-fail-hook", synccommon.HookTypeSyncFail, synccommon.HookDeletePolicyHookSucceeded) -func TestSyncFailureHookWithSuccessfulSync(t *testing.T) { - syncCtx := newTestSyncCtx(nil) + syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + pod.GetName(): health.HealthStatusHealthy, + hook.GetName(): health.HealthStatusHealthy, + }))) syncCtx.resources = groupResources(ReconciliationResult{ Live: []*unstructured.Unstructured{nil}, - Target: []*unstructured.Unstructured{testingutils.NewPod()}, + Target: []*unstructured.Unstructured{pod}, }) - syncCtx.hooks = []*unstructured.Unstructured{newHook(synccommon.HookTypeSyncFail)} + syncCtx.hooks = []*unstructured.Unstructured{hook, syncFailHook} + syncCtx.dynamicIf = fake.NewSimpleDynamicClient(runtime.NewScheme()) + // First sync does dry-run and starts hooks syncCtx.Sync() - phase, _, resources := syncCtx.GetState() + phase, message, resources := syncCtx.GetState() + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Equal(t, "waiting for completion of hook /Pod/hook-1 and 1 more resources", message) + assert.Len(t, resources, 2) + + // Update the live resources for the next sync + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{pod, hook}, + Target: []*unstructured.Unstructured{pod, nil}, + }) + + // Second sync completes hooks + syncCtx.Sync() + phase, message, resources = syncCtx.GetState() assert.Equal(t, synccommon.OperationSucceeded, phase) - // only one result, we did not run the failure failureHook - assert.Len(t, resources, 1) + assert.Equal(t, "successfully synced (no more tasks)", message) + assert.Len(t, resources, 2) } -func TestSyncFailureHookWithFailedSync(t *testing.T) { - syncCtx := newTestSyncCtx(nil) +func TestSync_FailedSyncWithSyncFailHook_HookFailed(t *testing.T) { + hook := newHook("hook-1", synccommon.HookTypeSync, synccommon.HookDeletePolicyBeforeHookCreation) pod := testingutils.NewPod() + syncFailHook := newHook("sync-fail-hook", synccommon.HookTypeSyncFail, synccommon.HookDeletePolicyHookSucceeded) + + syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + pod.GetName(): health.HealthStatusHealthy, + hook.GetName(): health.HealthStatusDegraded, // Hook failure + syncFailHook.GetName(): health.HealthStatusHealthy, + }))) syncCtx.resources = groupResources(ReconciliationResult{ Live: []*unstructured.Unstructured{nil}, Target: []*unstructured.Unstructured{pod}, }) - syncCtx.hooks = []*unstructured.Unstructured{newHook(synccommon.HookTypeSyncFail)} - mockKubectl := &kubetest.MockKubectlCmd{ - Commands: map[string]kubetest.KubectlOutput{pod.GetName(): {Err: errors.New("")}}, - } - syncCtx.kubectl = mockKubectl - mockResourceOps := kubetest.MockResourceOps{ - Commands: map[string]kubetest.KubectlOutput{pod.GetName(): {Err: errors.New("")}}, - } - syncCtx.resourceOps = &mockResourceOps + syncCtx.hooks = []*unstructured.Unstructured{hook, syncFailHook} + syncCtx.dynamicIf = fake.NewSimpleDynamicClient(runtime.NewScheme()) + // First sync does dry-run and starts hooks syncCtx.Sync() - syncCtx.Sync() - - phase, _, resources := syncCtx.GetState() - assert.Equal(t, synccommon.OperationFailed, phase) + phase, message, resources := syncCtx.GetState() + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Equal(t, "waiting for completion of hook /Pod/hook-1 and 1 more resources", message) assert.Len(t, resources, 2) -} -func TestBeforeHookCreation(t *testing.T) { - syncCtx := newTestSyncCtx(nil) - hook := testingutils.Annotate(testingutils.Annotate(testingutils.NewPod(), synccommon.AnnotationKeyHook, "Sync"), synccommon.AnnotationKeyHookDeletePolicy, "BeforeHookCreation") - hook.SetNamespace(testingutils.FakeArgoCDNamespace) + // Update the live resources for the next sync syncCtx.resources = groupResources(ReconciliationResult{ - Live: []*unstructured.Unstructured{hook}, - Target: []*unstructured.Unstructured{nil}, + Live: []*unstructured.Unstructured{pod, hook}, + Target: []*unstructured.Unstructured{pod, nil}, }) - syncCtx.hooks = []*unstructured.Unstructured{hook} - syncCtx.dynamicIf = fake.NewSimpleDynamicClient(runtime.NewScheme()) + // Second sync fails the sync and starts the SyncFail hooks syncCtx.Sync() + phase, message, resources = syncCtx.GetState() + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Equal(t, "waiting for completion of hook /Pod/sync-fail-hook", message) + assert.Len(t, resources, 3) - _, _, resources := syncCtx.GetState() - assert.Len(t, resources, 1) - assert.Empty(t, resources[0].Message) - assert.Equal(t, "waiting for completion of hook /Pod/my-pod", syncCtx.message) + // Update the live resources for the next sync + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{pod, hook, syncFailHook}, + Target: []*unstructured.Unstructured{pod, nil, nil}, + }) + + // Third sync completes the SyncFail hooks + syncCtx.Sync() + phase, message, resources = syncCtx.GetState() + assert.Equal(t, synccommon.OperationFailed, phase) + assert.Equal(t, "one or more synchronization tasks completed unsuccessfully", message) + assert.Len(t, resources, 3) } -func TestRunSyncFailHooksFailed(t *testing.T) { +func TestSync_FailedSyncWithSyncFailHook_ApplyFailed(t *testing.T) { // Tests that other SyncFail Hooks run even if one of them fail. - - syncCtx := newTestSyncCtx(nil) pod := testingutils.NewPod() - successfulSyncFailHook := newHook(synccommon.HookTypeSyncFail) - successfulSyncFailHook.SetName("successful-sync-fail-hook") - failedSyncFailHook := newHook(synccommon.HookTypeSyncFail) - failedSyncFailHook.SetName("failed-sync-fail-hook") + pod.SetNamespace(testingutils.FakeArgoCDNamespace) + successfulSyncFailHook := newHook("successful-sync-fail-hook", synccommon.HookTypeSyncFail, synccommon.HookDeletePolicyBeforeHookCreation) + failedSyncFailHook := newHook("failed-sync-fail-hook", synccommon.HookTypeSyncFail, synccommon.HookDeletePolicyBeforeHookCreation) + + syncCtx := newTestSyncCtx(nil, WithHealthOverride(resourceNameHealthOverride{ + successfulSyncFailHook.GetName(): health.HealthStatusHealthy, + })) syncCtx.resources = groupResources(ReconciliationResult{ Live: []*unstructured.Unstructured{nil}, Target: []*unstructured.Unstructured{pod}, }) syncCtx.hooks = []*unstructured.Unstructured{successfulSyncFailHook, failedSyncFailHook} + syncCtx.dynamicIf = fake.NewSimpleDynamicClient(runtime.NewScheme()) - mockKubectl := &kubetest.MockKubectlCmd{ - Commands: map[string]kubetest.KubectlOutput{ - // Fail operation - pod.GetName(): {Err: errors.New("")}, - // Fail a single SyncFail hook - failedSyncFailHook.GetName(): {Err: errors.New("")}, - }, - } - syncCtx.kubectl = mockKubectl mockResourceOps := kubetest.MockResourceOps{ Commands: map[string]kubetest.KubectlOutput{ // Fail operation - pod.GetName(): {Err: errors.New("")}, + pod.GetName(): {Err: errors.New("fake pod failure")}, // Fail a single SyncFail hook - failedSyncFailHook.GetName(): {Err: errors.New("")}, + failedSyncFailHook.GetName(): {Err: errors.New("fake hook failure")}, }, } syncCtx.resourceOps = &mockResourceOps + // First sync triggers the SyncFail hooks on failure syncCtx.Sync() - syncCtx.Sync() - phase, _, resources := syncCtx.GetState() + phase, message, resources := syncCtx.GetState() + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Equal(t, "waiting for completion of hook /Pod/failed-sync-fail-hook and 1 more hooks", message) + assert.Len(t, resources, 3) + podResult := getResourceResult(resources, kube.GetResourceKey(pod)) + require.NotNil(t, podResult, "%s not found", kube.GetResourceKey(pod)) + assert.Equal(t, synccommon.OperationFailed, podResult.HookPhase) + assert.Equal(t, synccommon.ResultCodeSyncFailed, podResult.Status) + + failedSyncFailHookResult := getResourceResult(resources, kube.GetResourceKey(failedSyncFailHook)) + require.NotNil(t, failedSyncFailHookResult, "%s not found", kube.GetResourceKey(failedSyncFailHook)) + assert.Equal(t, synccommon.OperationFailed, failedSyncFailHookResult.HookPhase) + assert.Equal(t, synccommon.ResultCodeSyncFailed, failedSyncFailHookResult.Status) + + successfulSyncFailHookResult := getResourceResult(resources, kube.GetResourceKey(successfulSyncFailHook)) + require.NotNil(t, successfulSyncFailHookResult, "%s not found", kube.GetResourceKey(successfulSyncFailHook)) + assert.Equal(t, synccommon.OperationRunning, successfulSyncFailHookResult.HookPhase) + assert.Equal(t, synccommon.ResultCodeSynced, successfulSyncFailHookResult.Status) + + // Update the live state for the next run + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{nil, successfulSyncFailHook}, + Target: []*unstructured.Unstructured{pod, nil}, + }) - // Operation as a whole should fail + // Second sync completes when the SyncFail hooks are done + syncCtx.Sync() + phase, message, resources = syncCtx.GetState() assert.Equal(t, synccommon.OperationFailed, phase) - // failedSyncFailHook should fail - assert.Equal(t, synccommon.OperationFailed, resources[1].HookPhase) - assert.Equal(t, synccommon.ResultCodeSyncFailed, resources[1].Status) - // successfulSyncFailHook should be synced running (it is an nginx pod) - assert.Equal(t, synccommon.OperationRunning, resources[2].HookPhase) - assert.Equal(t, synccommon.ResultCodeSynced, resources[2].Status) + assert.Equal(t, "one or more synchronization tasks completed unsuccessfully, reason: fake pod failure\none or more SyncFail hooks failed, reason: fake hook failure", message) + assert.Len(t, resources, 3) + + successfulSyncFailHookResult = getResourceResult(resources, kube.GetResourceKey(successfulSyncFailHook)) + require.NotNil(t, successfulSyncFailHookResult, "%s not found", kube.GetResourceKey(successfulSyncFailHook)) + assert.Equal(t, synccommon.OperationSucceeded, successfulSyncFailHookResult.HookPhase) + assert.Equal(t, synccommon.ResultCodeSynced, successfulSyncFailHookResult.Status) } -type resourceNameHealthOverride map[string]health.HealthStatusCode +func TestBeforeHookCreation(t *testing.T) { + finalizerRemoved := false + syncCtx := newTestSyncCtx(nil) + hookObj := testingutils.Annotate(testingutils.Annotate(testingutils.NewPod(), synccommon.AnnotationKeyHook, "Sync"), synccommon.AnnotationKeyHookDeletePolicy, "BeforeHookCreation") + hookObj.SetFinalizers([]string{hook.HookFinalizer}) + hookObj.SetNamespace(testingutils.FakeArgoCDNamespace) + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{hookObj}, + Target: []*unstructured.Unstructured{nil}, + }) + syncCtx.hooks = []*unstructured.Unstructured{hookObj} + client := fake.NewSimpleDynamicClient(runtime.NewScheme(), hookObj) + client.PrependReactor("update", "pods", func(_ testcore.Action) (bool, runtime.Object, error) { + finalizerRemoved = true + return false, nil, nil + }) + syncCtx.dynamicIf = client -func (r resourceNameHealthOverride) GetResourceHealth(obj *unstructured.Unstructured) (*health.HealthStatus, error) { - if status, ok := r[obj.GetName()]; ok { - return &health.HealthStatus{Status: status, Message: "test"}, nil - } - return nil, nil + // First sync will delete the existing hook + syncCtx.Sync() + phase, _, _ := syncCtx.GetState() + assert.Equal(t, synccommon.OperationRunning, phase) + assert.True(t, finalizerRemoved) + + // Second sync will create the hook + syncCtx.Sync() + phase, message, resources := syncCtx.GetState() + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Len(t, resources, 1) + assert.Equal(t, synccommon.OperationRunning, resources[0].HookPhase) + assert.Equal(t, "waiting for completion of hook /Pod/my-pod", message) } -func TestRunSync_HooksNotDeletedIfPhaseNotCompleted(t *testing.T) { - hook1 := newHook(synccommon.HookTypePreSync) - hook1.SetName("completed-hook") - hook1.SetNamespace(testingutils.FakeArgoCDNamespace) - _ = testingutils.Annotate(hook1, synccommon.AnnotationKeyHookDeletePolicy, string(synccommon.HookDeletePolicyHookSucceeded)) - completedHook := hook1.DeepCopy() - completedHook.SetFinalizers(append(completedHook.GetFinalizers(), hook.HookFinalizer)) +func TestSync_ExistingHooksWithFinalizer(t *testing.T) { + hook1 := newHook("existing-hook-1", synccommon.HookTypePreSync, synccommon.HookDeletePolicyBeforeHookCreation) + hook2 := newHook("existing-hook-2", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookFailed) + hook3 := newHook("existing-hook-3", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookSucceeded) - hook2 := newHook(synccommon.HookTypePreSync) - hook2.SetNamespace(testingutils.FakeArgoCDNamespace) - hook2.SetName("in-progress-hook") - _ = testingutils.Annotate(hook2, synccommon.AnnotationKeyHookDeletePolicy, string(synccommon.HookDeletePolicyHookSucceeded)) - inProgressHook := hook2.DeepCopy() - inProgressHook.SetFinalizers(append(inProgressHook.GetFinalizers(), hook.HookFinalizer)) + syncCtx := newTestSyncCtx(nil) + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1, hook2, hook3) + syncCtx.dynamicIf = fakeDynamicClient + updatedCount := 0 + fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + // Removing the finalizers + updatedCount++ + return false, nil, nil + }) + deletedCount := 0 + fakeDynamicClient.PrependReactor("delete", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + // because of HookDeletePolicyBeforeHookCreation + deletedCount++ + return false, nil, nil + }) + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{hook1, hook2, hook3}, + Target: []*unstructured.Unstructured{nil, nil, nil}, + }) + syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2, hook3} + + syncCtx.Sync() + phase, message, _ := syncCtx.GetState() + + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Equal(t, "waiting for deletion of hook /Pod/existing-hook-1", message) + assert.Equal(t, 3, updatedCount) + assert.Equal(t, 1, deletedCount) + + _, err := syncCtx.getResource(&syncTask{liveObj: hook1}) + require.Error(t, err, "Expected resource to be deleted") + assert.True(t, apierrors.IsNotFound(err)) +} + +func TestSync_HooksNotDeletedIfPhaseNotCompleted(t *testing.T) { + hook1 := newHook("hook-1", synccommon.HookTypePreSync, synccommon.HookDeletePolicyBeforeHookCreation) + hook2 := newHook("hook-2", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookFailed) + hook3 := newHook("hook-3", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookSucceeded) syncCtx := newTestSyncCtx(nil, WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ - inProgressHook.GetName(): health.HealthStatusProgressing, + hook1.GetName(): health.HealthStatusProgressing, // At least one is running + hook2.GetName(): health.HealthStatusHealthy, + hook3.GetName(): health.HealthStatusHealthy, })), WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ - ResourceKey: kube.GetResourceKey(completedHook), - HookPhase: synccommon.OperationSucceeded, + ResourceKey: kube.GetResourceKey(hook1), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, SyncPhase: synccommon.SyncPhasePreSync, + Order: 0, }, { - ResourceKey: kube.GetResourceKey(inProgressHook), + ResourceKey: kube.GetResourceKey(hook2), HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, SyncPhase: synccommon.SyncPhasePreSync, + Order: 1, + }, { + ResourceKey: kube.GetResourceKey(hook3), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhasePreSync, + Order: 2, }}, metav1.Now(), )) - fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1, hook2, hook3) syncCtx.dynamicIf = fakeDynamicClient updatedCount := 0 fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { // Removing the finalizers updatedCount++ - return true, nil, nil + return false, nil, nil }) deletedCount := 0 fakeDynamicClient.PrependReactor("delete", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { deletedCount++ - return true, nil, nil + return false, nil, nil }) syncCtx.resources = groupResources(ReconciliationResult{ - Live: []*unstructured.Unstructured{completedHook, inProgressHook}, - Target: []*unstructured.Unstructured{nil, nil}, + Live: []*unstructured.Unstructured{hook1, hook2, hook3}, + Target: []*unstructured.Unstructured{nil, nil, nil}, }) - syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2} - - syncCtx.kubectl = &kubetest.MockKubectlCmd{ - Commands: map[string]kubetest.KubectlOutput{}, - } + syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2, hook3} syncCtx.Sync() + phase, message, _ := syncCtx.GetState() - assert.Equal(t, synccommon.OperationRunning, syncCtx.phase) + assert.Equal(t, synccommon.OperationRunning, phase) + assert.Equal(t, "waiting for completion of hook /Pod/hook-1", message) assert.Equal(t, 0, updatedCount) assert.Equal(t, 0, deletedCount) } -func TestRunSync_HooksDeletedAfterPhaseCompleted(t *testing.T) { - hook1 := newHook(synccommon.HookTypePreSync) - hook1.SetName("completed-hook1") - hook1.SetNamespace(testingutils.FakeArgoCDNamespace) - _ = testingutils.Annotate(hook1, synccommon.AnnotationKeyHookDeletePolicy, string(synccommon.HookDeletePolicyHookSucceeded)) - completedHook1 := hook1.DeepCopy() - completedHook1.SetFinalizers(append(completedHook1.GetFinalizers(), hook.HookFinalizer)) - - hook2 := newHook(synccommon.HookTypePreSync) - hook2.SetNamespace(testingutils.FakeArgoCDNamespace) - hook2.SetName("completed-hook2") - _ = testingutils.Annotate(hook2, synccommon.AnnotationKeyHookDeletePolicy, string(synccommon.HookDeletePolicyHookSucceeded)) - completedHook2 := hook2.DeepCopy() - completedHook2.SetFinalizers(append(completedHook1.GetFinalizers(), hook.HookFinalizer)) +func TestSync_HooksDeletedAfterSyncSucceeded(t *testing.T) { + hook1 := newHook("hook-1", synccommon.HookTypePreSync, synccommon.HookDeletePolicyBeforeHookCreation) + hook2 := newHook("hook-2", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookFailed) + hook3 := newHook("hook-3", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookSucceeded) syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + hook1.GetName(): health.HealthStatusHealthy, + hook2.GetName(): health.HealthStatusHealthy, + hook3.GetName(): health.HealthStatusHealthy, + })), WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ - ResourceKey: kube.GetResourceKey(completedHook1), - HookPhase: synccommon.OperationSucceeded, + ResourceKey: kube.GetResourceKey(hook1), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, SyncPhase: synccommon.SyncPhasePreSync, + Order: 0, }, { - ResourceKey: kube.GetResourceKey(completedHook2), - HookPhase: synccommon.OperationSucceeded, + ResourceKey: kube.GetResourceKey(hook2), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhasePreSync, + Order: 1, + }, { + ResourceKey: kube.GetResourceKey(hook3), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, SyncPhase: synccommon.SyncPhasePreSync, + Order: 2, }}, metav1.Now(), )) - fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1, hook2, hook3) syncCtx.dynamicIf = fakeDynamicClient updatedCount := 0 fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { // Removing the finalizers updatedCount++ - return true, nil, nil + return false, nil, nil }) deletedCount := 0 fakeDynamicClient.PrependReactor("delete", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { deletedCount++ - return true, nil, nil + return false, nil, nil }) syncCtx.resources = groupResources(ReconciliationResult{ - Live: []*unstructured.Unstructured{completedHook1, completedHook2}, - Target: []*unstructured.Unstructured{nil, nil}, + Live: []*unstructured.Unstructured{hook1, hook2, hook3}, + Target: []*unstructured.Unstructured{nil, nil, nil}, }) - syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2} - - syncCtx.kubectl = &kubetest.MockKubectlCmd{ - Commands: map[string]kubetest.KubectlOutput{}, - } + syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2, hook3} syncCtx.Sync() + phase, message, _ := syncCtx.GetState() - assert.Equal(t, synccommon.OperationSucceeded, syncCtx.phase) - assert.Equal(t, 2, updatedCount) - assert.Equal(t, 2, deletedCount) -} + assert.Equal(t, synccommon.OperationSucceeded, phase) + assert.Equal(t, "successfully synced (no more tasks)", message) + assert.Equal(t, 3, updatedCount) + assert.Equal(t, 1, deletedCount) -func TestRunSync_HooksDeletedAfterPhaseCompletedFailed(t *testing.T) { - hook1 := newHook(synccommon.HookTypeSync) - hook1.SetName("completed-hook1") - hook1.SetNamespace(testingutils.FakeArgoCDNamespace) - _ = testingutils.Annotate(hook1, synccommon.AnnotationKeyHookDeletePolicy, string(synccommon.HookDeletePolicyHookFailed)) - completedHook1 := hook1.DeepCopy() - completedHook1.SetFinalizers(append(completedHook1.GetFinalizers(), hook.HookFinalizer)) + _, err := syncCtx.getResource(&syncTask{liveObj: hook3}) + require.Error(t, err, "Expected resource to be deleted") + assert.True(t, apierrors.IsNotFound(err)) +} - hook2 := newHook(synccommon.HookTypeSync) - hook2.SetNamespace(testingutils.FakeArgoCDNamespace) - hook2.SetName("completed-hook2") - _ = testingutils.Annotate(hook2, synccommon.AnnotationKeyHookDeletePolicy, string(synccommon.HookDeletePolicyHookFailed)) - completedHook2 := hook2.DeepCopy() - completedHook2.SetFinalizers(append(completedHook1.GetFinalizers(), hook.HookFinalizer)) +func TestSync_HooksDeletedAfterSyncFailed(t *testing.T) { + hook1 := newHook("hook-1", synccommon.HookTypePreSync, synccommon.HookDeletePolicyBeforeHookCreation) + hook2 := newHook("hook-2", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookFailed) + hook3 := newHook("hook-3", synccommon.HookTypePreSync, synccommon.HookDeletePolicyHookSucceeded) syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + hook1.GetName(): health.HealthStatusDegraded, // At least one has failed + hook2.GetName(): health.HealthStatusHealthy, + hook3.GetName(): health.HealthStatusHealthy, + })), WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ - ResourceKey: kube.GetResourceKey(completedHook1), - HookPhase: synccommon.OperationSucceeded, - SyncPhase: synccommon.SyncPhaseSync, + ResourceKey: kube.GetResourceKey(hook1), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhasePreSync, + Order: 0, }, { - ResourceKey: kube.GetResourceKey(completedHook2), - HookPhase: synccommon.OperationFailed, - SyncPhase: synccommon.SyncPhaseSync, + ResourceKey: kube.GetResourceKey(hook2), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhasePreSync, + Order: 1, + }, { + ResourceKey: kube.GetResourceKey(hook3), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhasePreSync, + Order: 2, }}, metav1.Now(), )) - fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1, hook2, hook3) syncCtx.dynamicIf = fakeDynamicClient updatedCount := 0 fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { // Removing the finalizers updatedCount++ - return true, nil, nil + return false, nil, nil }) deletedCount := 0 fakeDynamicClient.PrependReactor("delete", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { deletedCount++ - return true, nil, nil + return false, nil, nil }) + syncCtx.resources = groupResources(ReconciliationResult{ - Live: []*unstructured.Unstructured{completedHook1, completedHook2}, - Target: []*unstructured.Unstructured{nil, nil}, + Live: []*unstructured.Unstructured{hook1, hook2, hook3}, + Target: []*unstructured.Unstructured{nil, nil, nil}, }) - syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2} - - syncCtx.kubectl = &kubetest.MockKubectlCmd{ - Commands: map[string]kubetest.KubectlOutput{}, - } + syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2, hook3} syncCtx.Sync() + phase, message, _ := syncCtx.GetState() - assert.Equal(t, synccommon.OperationFailed, syncCtx.phase) - assert.Equal(t, 2, updatedCount) - assert.Equal(t, 2, deletedCount) + assert.Equal(t, synccommon.OperationFailed, phase) + assert.Equal(t, "one or more synchronization tasks completed unsuccessfully", message) + assert.Equal(t, 3, updatedCount) + assert.Equal(t, 1, deletedCount) + + _, err := syncCtx.getResource(&syncTask{liveObj: hook2}) + require.Error(t, err, "Expected resource to be deleted") + assert.True(t, apierrors.IsNotFound(err)) } func Test_syncContext_liveObj(t *testing.T) { @@ -1599,9 +1754,9 @@ func Test_setRunningPhase_runningHooks(t *testing.T) { var sc syncContext sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app") - sc.setRunningPhase([]*syncTask{{targetObj: newHook(synccommon.HookTypeSyncFail)}}, false) + sc.setRunningPhase([]*syncTask{{targetObj: newHook("my-hook", synccommon.HookTypeSyncFail, synccommon.HookDeletePolicyBeforeHookCreation)}}, false) - assert.Equal(t, "waiting for completion of hook /Pod/my-pod", sc.message) + assert.Equal(t, "waiting for completion of hook /Pod/my-hook", sc.message) } func Test_setRunningPhase_pendingDeletion(t *testing.T) { @@ -1613,7 +1768,7 @@ func Test_setRunningPhase_pendingDeletion(t *testing.T) { assert.Equal(t, "waiting for deletion of /Pod/my-pod and 2 more resources", sc.message) } -func TestSyncWaveHook(t *testing.T) { +func TestSync_SyncWaveHook(t *testing.T) { syncCtx := newTestSyncCtx(nil, WithOperationSettings(false, false, false, false)) pod1 := testingutils.NewPod() pod1.SetName("pod-1") @@ -1684,15 +1839,21 @@ func TestSyncWaveHook(t *testing.T) { assert.True(t, called) } -func TestSyncWaveHookFail(t *testing.T) { +func TestSync_SyncWaveHookError(t *testing.T) { syncCtx := newTestSyncCtx(nil, WithOperationSettings(false, false, false, false)) pod1 := testingutils.NewPod() + pod1.SetNamespace(testingutils.FakeArgoCDNamespace) pod1.SetName("pod-1") + syncHook := newHook("sync-hook", synccommon.HookTypeSync, synccommon.HookDeletePolicyBeforeHookCreation) + syncFailHook := newHook("sync-fail-hook", synccommon.HookTypeSyncFail, synccommon.HookDeletePolicyBeforeHookCreation) + + syncCtx.dynamicIf = fake.NewSimpleDynamicClient(runtime.NewScheme()) syncCtx.resources = groupResources(ReconciliationResult{ Live: []*unstructured.Unstructured{nil}, Target: []*unstructured.Unstructured{pod1}, }) + syncCtx.hooks = []*unstructured.Unstructured{syncHook, syncFailHook} called := false syncCtx.syncWaveHook = func(_ synccommon.SyncPhase, _ int, _ bool) error { @@ -1702,9 +1863,17 @@ func TestSyncWaveHookFail(t *testing.T) { syncCtx.Sync() assert.True(t, called) phase, msg, results := syncCtx.GetState() - assert.Equal(t, synccommon.OperationFailed, phase) + assert.Equal(t, synccommon.OperationError, phase) assert.Equal(t, "SyncWaveHook failed: intentional error", msg) - assert.Equal(t, synccommon.OperationRunning, results[0].HookPhase) + require.Len(t, results, 2) + + podResult := getResourceResult(results, kube.GetResourceKey(pod1)) + require.NotNil(t, podResult, "%s not found", kube.GetResourceKey(pod1)) + assert.Equal(t, synccommon.OperationRunning, podResult.HookPhase) + + hookResult := getResourceResult(results, kube.GetResourceKey(syncHook)) + require.NotNil(t, hookResult, "%s not found", kube.GetResourceKey(syncHook)) + assert.Equal(t, "Terminated", hookResult.Message) } func TestPruneLast(t *testing.T) { @@ -1810,19 +1979,20 @@ func TestSyncContext_GetDeleteOptions_WithPrunePropagationPolicy(t *testing.T) { assert.Equal(t, metav1.DeletePropagationBackground, *opts.PropagationPolicy) } -func TestSetOperationFailed(t *testing.T) { +func Test_executeSyncFailPhase(t *testing.T) { sc := syncContext{} sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app") tasks := make([]*syncTask, 0) tasks = append(tasks, &syncTask{message: "namespace not found"}) - sc.setOperationFailed(nil, tasks, "one or more objects failed to apply") + sc.executeSyncFailPhase(nil, tasks, "one or more objects failed to apply") assert.Equal(t, "one or more objects failed to apply, reason: namespace not found", sc.message) + assert.Equal(t, synccommon.OperationFailed, sc.phase) } -func TestSetOperationFailedDuplicatedMessages(t *testing.T) { +func Test_executeSyncFailPhase_DuplicatedMessages(t *testing.T) { sc := syncContext{} sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app") @@ -1830,18 +2000,53 @@ func TestSetOperationFailedDuplicatedMessages(t *testing.T) { tasks = append(tasks, &syncTask{message: "namespace not found"}) tasks = append(tasks, &syncTask{message: "namespace not found"}) - sc.setOperationFailed(nil, tasks, "one or more objects failed to apply") + sc.executeSyncFailPhase(nil, tasks, "one or more objects failed to apply") assert.Equal(t, "one or more objects failed to apply, reason: namespace not found", sc.message) + assert.Equal(t, synccommon.OperationFailed, sc.phase) } -func TestSetOperationFailedNoTasks(t *testing.T) { +func Test_executeSyncFailPhase_NoTasks(t *testing.T) { sc := syncContext{} sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app") - sc.setOperationFailed(nil, nil, "one or more objects failed to apply") + sc.executeSyncFailPhase(nil, nil, "one or more objects failed to apply") assert.Equal(t, "one or more objects failed to apply", sc.message) + assert.Equal(t, synccommon.OperationFailed, sc.phase) +} + +func Test_executeSyncFailPhase_RunningHooks(t *testing.T) { + sc := syncContext{ + phase: synccommon.OperationRunning, + } + sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app") + + tasks := make([]*syncTask, 0) + tasks = append(tasks, &syncTask{operationState: synccommon.OperationRunning}) + + sc.executeSyncFailPhase(tasks, nil, "one or more objects failed to apply") + + assert.Equal(t, synccommon.OperationRunning, sc.phase) +} + +func Test_executeSyncFailPhase_CompletedHooks(t *testing.T) { + sc := syncContext{ + phase: synccommon.OperationRunning, + } + sc.log = textlogger.NewLogger(textlogger.NewConfig()).WithValues("application", "fake-app") + + failed := make([]*syncTask, 0) + failed = append(failed, &syncTask{message: "task in error"}) + + tasks := make([]*syncTask, 0) + tasks = append(tasks, &syncTask{operationState: synccommon.OperationSucceeded}) + tasks = append(tasks, &syncTask{operationState: synccommon.OperationFailed, syncStatus: synccommon.ResultCodeSyncFailed, message: "failed to apply"}) + + sc.executeSyncFailPhase(tasks, failed, "one or more objects failed to apply") + + assert.Equal(t, "one or more objects failed to apply, reason: task in error\none or more SyncFail hooks failed, reason: failed to apply", sc.message) + assert.Equal(t, synccommon.OperationFailed, sc.phase) } func TestWaveReorderingOfPruneTasks(t *testing.T) { @@ -2082,15 +2287,15 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) { var phase synccommon.OperationPhase var msg string - var result []synccommon.ResourceSyncResult + var results []synccommon.ResourceSyncResult // 1st sync should prune only pod3 syncCtx.Sync() - phase, _, result = syncCtx.GetState() + phase, _, results = syncCtx.GetState() assert.Equal(t, synccommon.OperationRunning, phase) - assert.Len(t, result, 1) - assert.Equal(t, "pod-3", result[0].ResourceKey.Name) - assert.Equal(t, synccommon.ResultCodePruned, result[0].Status) + assert.Len(t, results, 1) + assert.Equal(t, "pod-3", results[0].ResourceKey.Name) + assert.Equal(t, synccommon.ResultCodePruned, results[0].Status) // simulate successful delete of pod3 syncCtx.resources = groupResources(ReconciliationResult{ @@ -2100,11 +2305,11 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) { // next sync should prune only pod2 syncCtx.Sync() - phase, _, result = syncCtx.GetState() + phase, _, results = syncCtx.GetState() assert.Equal(t, synccommon.OperationRunning, phase) - assert.Len(t, result, 2) - assert.Equal(t, "pod-2", result[1].ResourceKey.Name) - assert.Equal(t, synccommon.ResultCodePruned, result[1].Status) + assert.Len(t, results, 2) + assert.Equal(t, "pod-2", results[1].ResourceKey.Name) + assert.Equal(t, synccommon.ResultCodePruned, results[1].Status) // add delete timestamp on pod2 to simulate pending delete pod2.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) @@ -2112,10 +2317,10 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) { // next sync should wait for deletion of pod2 from cluster, // it should not move to next wave and prune pod1 syncCtx.Sync() - phase, msg, result = syncCtx.GetState() + phase, msg, results = syncCtx.GetState() assert.Equal(t, synccommon.OperationRunning, phase) assert.Equal(t, "waiting for deletion of /Pod/pod-2", msg) - assert.Len(t, result, 2) + assert.Len(t, results, 2) // simulate successful delete of pod2 syncCtx.resources = groupResources(ReconciliationResult{ @@ -2126,15 +2331,15 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) { // next sync should proceed with next wave // i.e deletion of pod1 syncCtx.Sync() - phase, _, result = syncCtx.GetState() + phase, _, results = syncCtx.GetState() assert.Equal(t, synccommon.OperationSucceeded, phase) - assert.Len(t, result, 3) - assert.Equal(t, "pod-3", result[0].ResourceKey.Name) - assert.Equal(t, "pod-2", result[1].ResourceKey.Name) - assert.Equal(t, "pod-1", result[2].ResourceKey.Name) - assert.Equal(t, synccommon.ResultCodePruned, result[0].Status) - assert.Equal(t, synccommon.ResultCodePruned, result[1].Status) - assert.Equal(t, synccommon.ResultCodePruned, result[2].Status) + assert.Len(t, results, 3) + assert.Equal(t, "pod-3", results[0].ResourceKey.Name) + assert.Equal(t, "pod-2", results[1].ResourceKey.Name) + assert.Equal(t, "pod-1", results[2].ResourceKey.Name) + assert.Equal(t, synccommon.ResultCodePruned, results[0].Status) + assert.Equal(t, synccommon.ResultCodePruned, results[1].Status) + assert.Equal(t, synccommon.ResultCodePruned, results[2].Status) } func BenchmarkSync(b *testing.B) { @@ -2262,3 +2467,314 @@ func TestNeedsClientSideApplyMigration(t *testing.T) { }) } } + +func TestTerminate(t *testing.T) { + obj := testingutils.NewPod() + obj.SetNamespace(testingutils.FakeArgoCDNamespace) + + syncCtx := newTestSyncCtx(nil, + WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ + ResourceKey: kube.GetResourceKey(obj), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + }}, + metav1.Now(), + )) + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{obj}, + Target: []*unstructured.Unstructured{obj}, + }) + syncCtx.hooks = []*unstructured.Unstructured{} + + syncCtx.Terminate() + assert.Equal(t, synccommon.OperationFailed, syncCtx.phase) + assert.Equal(t, "Operation terminated", syncCtx.message) +} + +func TestTerminate_Hooks_Running(t *testing.T) { + hook1 := newHook("hook-1", synccommon.HookTypeSync, synccommon.HookDeletePolicyBeforeHookCreation) + hook2 := newHook("hook-2", synccommon.HookTypeSync, synccommon.HookDeletePolicyHookFailed) + hook3 := newHook("hook-3", synccommon.HookTypeSync, synccommon.HookDeletePolicyHookSucceeded) + + obj := testingutils.NewPod() + obj.SetNamespace(testingutils.FakeArgoCDNamespace) + + syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + hook1.GetName(): health.HealthStatusProgressing, + hook2.GetName(): health.HealthStatusProgressing, + hook3.GetName(): health.HealthStatusProgressing, + })), + WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ + ResourceKey: kube.GetResourceKey(hook1), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 0, + }, { + ResourceKey: kube.GetResourceKey(hook2), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 1, + }, { + ResourceKey: kube.GetResourceKey(hook3), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 2, + }, { + ResourceKey: kube.GetResourceKey(obj), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 3, + }}, + metav1.Now(), + )) + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{obj, hook1, hook2, hook3}, + Target: []*unstructured.Unstructured{obj, nil, nil, nil}, + }) + syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2, hook3} + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1, hook2, hook3) + syncCtx.dynamicIf = fakeDynamicClient + updatedCount := 0 + fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + // Removing the finalizers + updatedCount++ + return false, nil, nil + }) + deletedCount := 0 + fakeDynamicClient.PrependReactor("delete", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + deletedCount++ + return false, nil, nil + }) + + syncCtx.Terminate() + phase, message, results := syncCtx.GetState() + assert.Equal(t, synccommon.OperationFailed, phase) + assert.Equal(t, "Operation terminated", message) + require.Len(t, results, 4) + assert.Equal(t, kube.GetResourceKey(hook1), results[0].ResourceKey) + assert.Equal(t, synccommon.OperationFailed, results[0].HookPhase) + assert.Equal(t, "Terminated", results[0].Message) + assert.Equal(t, kube.GetResourceKey(hook2), results[1].ResourceKey) + assert.Equal(t, synccommon.OperationFailed, results[1].HookPhase) + assert.Equal(t, "Terminated", results[1].Message) + assert.Equal(t, kube.GetResourceKey(hook3), results[2].ResourceKey) + assert.Equal(t, synccommon.OperationFailed, results[2].HookPhase) + assert.Equal(t, "Terminated", results[2].Message) + assert.Equal(t, 3, updatedCount) + assert.Equal(t, 3, deletedCount) +} + +func TestTerminate_Hooks_Running_Healthy(t *testing.T) { + hook1 := newHook("hook-1", synccommon.HookTypeSync, synccommon.HookDeletePolicyBeforeHookCreation) + hook2 := newHook("hook-2", synccommon.HookTypeSync, synccommon.HookDeletePolicyHookFailed) + hook3 := newHook("hook-3", synccommon.HookTypeSync, synccommon.HookDeletePolicyHookSucceeded) + + obj := testingutils.NewPod() + obj.SetNamespace(testingutils.FakeArgoCDNamespace) + + syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + hook1.GetName(): health.HealthStatusHealthy, + hook2.GetName(): health.HealthStatusHealthy, + hook3.GetName(): health.HealthStatusHealthy, + })), + WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ + ResourceKey: kube.GetResourceKey(hook1), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 0, + }, { + ResourceKey: kube.GetResourceKey(hook2), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 1, + }, { + ResourceKey: kube.GetResourceKey(hook3), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 2, + }, { + ResourceKey: kube.GetResourceKey(obj), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 3, + }}, + metav1.Now(), + )) + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{obj, hook1, hook2, hook3}, + Target: []*unstructured.Unstructured{obj, nil, nil, nil}, + }) + syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2, hook3} + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1, hook2, hook3) + syncCtx.dynamicIf = fakeDynamicClient + updatedCount := 0 + fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + // Removing the finalizers + updatedCount++ + return false, nil, nil + }) + deletedCount := 0 + fakeDynamicClient.PrependReactor("delete", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + deletedCount++ + return false, nil, nil + }) + + syncCtx.Terminate() + phase, message, results := syncCtx.GetState() + assert.Equal(t, synccommon.OperationFailed, phase) + assert.Equal(t, "Operation terminated", message) + require.Len(t, results, 4) + assert.Equal(t, kube.GetResourceKey(hook1), results[0].ResourceKey) + assert.Equal(t, synccommon.OperationSucceeded, results[0].HookPhase) + assert.Equal(t, "test", results[0].Message) + assert.Equal(t, kube.GetResourceKey(hook2), results[1].ResourceKey) + assert.Equal(t, synccommon.OperationSucceeded, results[1].HookPhase) + assert.Equal(t, "test", results[1].Message) + assert.Equal(t, kube.GetResourceKey(hook3), results[2].ResourceKey) + assert.Equal(t, synccommon.OperationSucceeded, results[2].HookPhase) + assert.Equal(t, "test", results[2].Message) + assert.Equal(t, 3, updatedCount) + assert.Equal(t, 1, deletedCount) + + _, err := syncCtx.getResource(&syncTask{liveObj: hook2}) + require.Error(t, err, "Expected resource to be deleted") + assert.True(t, apierrors.IsNotFound(err)) +} + +func TestTerminate_Hooks_Completed(t *testing.T) { + hook1 := newHook("hook-1", synccommon.HookTypeSync, synccommon.HookDeletePolicyBeforeHookCreation) + hook2 := newHook("hook-2", synccommon.HookTypeSync, synccommon.HookDeletePolicyHookFailed) + hook3 := newHook("hook-3", synccommon.HookTypeSync, synccommon.HookDeletePolicyHookSucceeded) + + obj := testingutils.NewPod() + obj.SetNamespace(testingutils.FakeArgoCDNamespace) + + syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + hook1.GetName(): health.HealthStatusHealthy, + hook2.GetName(): health.HealthStatusHealthy, + hook3.GetName(): health.HealthStatusHealthy, + })), + WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ + ResourceKey: kube.GetResourceKey(hook1), + HookPhase: synccommon.OperationSucceeded, + Message: "hook1 completed", + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 0, + }, { + ResourceKey: kube.GetResourceKey(hook2), + HookPhase: synccommon.OperationFailed, + Message: "hook2 failed", + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 1, + }, { + ResourceKey: kube.GetResourceKey(hook3), + HookPhase: synccommon.OperationError, + Message: "hook3 error", + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 2, + }, { + ResourceKey: kube.GetResourceKey(obj), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 3, + }}, + metav1.Now(), + )) + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{obj, hook1, hook2, hook3}, + Target: []*unstructured.Unstructured{obj, nil, nil, nil}, + }) + syncCtx.hooks = []*unstructured.Unstructured{hook1, hook2, hook3} + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1, hook2, hook3) + syncCtx.dynamicIf = fakeDynamicClient + updatedCount := 0 + fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + // Removing the finalizers + updatedCount++ + return false, nil, nil + }) + deletedCount := 0 + fakeDynamicClient.PrependReactor("delete", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + deletedCount++ + return false, nil, nil + }) + + syncCtx.Terminate() + phase, message, results := syncCtx.GetState() + assert.Equal(t, synccommon.OperationFailed, phase) + assert.Equal(t, "Operation terminated", message) + require.Len(t, results, 4) + assert.Equal(t, kube.GetResourceKey(hook1), results[0].ResourceKey) + assert.Equal(t, synccommon.OperationSucceeded, results[0].HookPhase) + assert.Equal(t, "hook1 completed", results[0].Message) + assert.Equal(t, kube.GetResourceKey(hook2), results[1].ResourceKey) + assert.Equal(t, synccommon.OperationFailed, results[1].HookPhase) + assert.Equal(t, "hook2 failed", results[1].Message) + assert.Equal(t, kube.GetResourceKey(hook3), results[2].ResourceKey) + assert.Equal(t, synccommon.OperationError, results[2].HookPhase) + assert.Equal(t, "hook3 error", results[2].Message) + assert.Equal(t, 3, updatedCount) + assert.Equal(t, 0, deletedCount) +} + +func TestTerminate_Hooks_Error(t *testing.T) { + hook1 := newHook("hook-1", synccommon.HookTypeSync, synccommon.HookDeletePolicyBeforeHookCreation) + + obj := testingutils.NewPod() + obj.SetNamespace(testingutils.FakeArgoCDNamespace) + + syncCtx := newTestSyncCtx(nil, + WithHealthOverride(resourceNameHealthOverride(map[string]health.HealthStatusCode{ + hook1.GetName(): health.HealthStatusHealthy, + })), + WithInitialState(synccommon.OperationRunning, "", []synccommon.ResourceSyncResult{{ + ResourceKey: kube.GetResourceKey(hook1), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 0, + }, { + ResourceKey: kube.GetResourceKey(obj), + HookPhase: synccommon.OperationRunning, + Status: synccommon.ResultCodeSynced, + SyncPhase: synccommon.SyncPhaseSync, + Order: 1, + }}, + metav1.Now(), + )) + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{obj, hook1}, + Target: []*unstructured.Unstructured{obj, nil}, + }) + syncCtx.hooks = []*unstructured.Unstructured{hook1} + fakeDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), hook1) + syncCtx.dynamicIf = fakeDynamicClient + fakeDynamicClient.PrependReactor("update", "*", func(_ testcore.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, apierrors.NewInternalError(errors.New("update failed")) + }) + + syncCtx.Terminate() + phase, message, results := syncCtx.GetState() + assert.Equal(t, synccommon.OperationError, phase) + assert.Equal(t, "Operation termination had errors", message) + require.Len(t, results, 2) + assert.Equal(t, kube.GetResourceKey(hook1), results[0].ResourceKey) + assert.Equal(t, synccommon.OperationError, results[0].HookPhase) + assert.Contains(t, results[0].Message, "update failed") +} diff --git a/pkg/sync/sync_task_test.go b/pkg/sync/sync_task_test.go index 16825d0b4..de946f833 100644 --- a/pkg/sync/sync_task_test.go +++ b/pkg/sync/sync_task_test.go @@ -10,10 +10,6 @@ import ( testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" ) -func newHook(hookType common.HookType) *unstructured.Unstructured { - return testingutils.Annotate(testingutils.NewPod(), "argocd.argoproj.io/hook", string(hookType)) -} - func Test_syncTask_hookType(t *testing.T) { type fields struct { phase common.SyncPhase @@ -25,9 +21,9 @@ func Test_syncTask_hookType(t *testing.T) { want common.HookType }{ {"Empty", fields{common.SyncPhaseSync, testingutils.NewPod()}, ""}, - {"PreSyncHook", fields{common.SyncPhasePreSync, newHook(common.HookTypePreSync)}, common.HookTypePreSync}, - {"SyncHook", fields{common.SyncPhaseSync, newHook(common.HookTypeSync)}, common.HookTypeSync}, - {"PostSyncHook", fields{common.SyncPhasePostSync, newHook(common.HookTypePostSync)}, common.HookTypePostSync}, + {"PreSyncHook", fields{common.SyncPhasePreSync, newHook("hook", common.HookTypePreSync, common.HookDeletePolicyBeforeHookCreation)}, common.HookTypePreSync}, + {"SyncHook", fields{common.SyncPhaseSync, newHook("hook", common.HookTypeSync, common.HookDeletePolicyBeforeHookCreation)}, common.HookTypeSync}, + {"PostSyncHook", fields{common.SyncPhasePostSync, newHook("hook", common.HookTypePostSync, common.HookDeletePolicyBeforeHookCreation)}, common.HookTypePostSync}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/utils/kube/convert_test.go b/pkg/utils/kube/convert_test.go index 43e6ebe38..436761e16 100644 --- a/pkg/utils/kube/convert_test.go +++ b/pkg/utils/kube/convert_test.go @@ -3,12 +3,12 @@ package kube import ( "testing" - testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/yaml" + + testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" ) type testcase struct { diff --git a/pkg/utils/kube/ctl_test.go b/pkg/utils/kube/ctl_test.go index 11bb6c786..552339234 100644 --- a/pkg/utils/kube/ctl_test.go +++ b/pkg/utils/kube/ctl_test.go @@ -12,8 +12,9 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/klog/v2/textlogger" - testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" "github.com/argoproj/gitops-engine/pkg/utils/tracing" + + testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" ) var _ Kubectl = &KubectlCmd{} diff --git a/pkg/utils/kube/kube_test.go b/pkg/utils/kube/kube_test.go index e280bd391..f25276388 100644 --- a/pkg/utils/kube/kube_test.go +++ b/pkg/utils/kube/kube_test.go @@ -150,7 +150,7 @@ spec: - name: nginx image: nginx:1.7.9 ports: - - containerPort: 80 + - containerPort: 80 `) deployment := unstructured.Unstructured{} err := yaml.Unmarshal(manifest, &deployment) diff --git a/pkg/utils/kube/kubetest/mock_resource_operations.go b/pkg/utils/kube/kubetest/mock_resource_operations.go index 8f4242821..ae93c7a95 100644 --- a/pkg/utils/kube/kubetest/mock_resource_operations.go +++ b/pkg/utils/kube/kubetest/mock_resource_operations.go @@ -15,9 +15,10 @@ import ( ) type MockResourceOps struct { - Commands map[string]KubectlOutput - Events chan watch.Event - DynamicClient dynamic.Interface + ExecuteForDryRun bool + Commands map[string]KubectlOutput + Events chan watch.Event + DynamicClient dynamic.Interface lastCommandPerResource map[kube.ResourceKey]string lastValidate bool @@ -106,7 +107,10 @@ func (r *MockResourceOps) GetLastResourceCommand(key kube.ResourceKey) string { return r.lastCommandPerResource[key] } -func (r *MockResourceOps) ApplyResource(_ context.Context, obj *unstructured.Unstructured, _ cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error) { +func (r *MockResourceOps) ApplyResource(_ context.Context, obj *unstructured.Unstructured, dryRun cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error) { + if dryRun != cmdutil.DryRunNone && !r.ExecuteForDryRun { + return "", nil + } r.SetLastValidate(validate) r.SetLastServerSideApply(serverSideApply) r.SetLastServerSideApplyManager(manager) @@ -120,7 +124,10 @@ func (r *MockResourceOps) ApplyResource(_ context.Context, obj *unstructured.Uns return command.Output, command.Err } -func (r *MockResourceOps) ReplaceResource(_ context.Context, obj *unstructured.Unstructured, _ cmdutil.DryRunStrategy, force bool) (string, error) { +func (r *MockResourceOps) ReplaceResource(_ context.Context, obj *unstructured.Unstructured, dryRun cmdutil.DryRunStrategy, force bool) (string, error) { + if dryRun != cmdutil.DryRunNone && !r.ExecuteForDryRun { + return "", nil + } r.SetLastForce(force) command, ok := r.Commands[obj.GetName()] r.SetLastResourceCommand(kube.GetResourceKey(obj), "replace") @@ -131,7 +138,10 @@ func (r *MockResourceOps) ReplaceResource(_ context.Context, obj *unstructured.U return command.Output, command.Err } -func (r *MockResourceOps) UpdateResource(_ context.Context, obj *unstructured.Unstructured, _ cmdutil.DryRunStrategy) (*unstructured.Unstructured, error) { +func (r *MockResourceOps) UpdateResource(_ context.Context, obj *unstructured.Unstructured, dryRun cmdutil.DryRunStrategy) (*unstructured.Unstructured, error) { + if dryRun != cmdutil.DryRunNone && !r.ExecuteForDryRun { + return obj, nil + } r.SetLastResourceCommand(kube.GetResourceKey(obj), "update") command, ok := r.Commands[obj.GetName()] if !ok { @@ -140,7 +150,10 @@ func (r *MockResourceOps) UpdateResource(_ context.Context, obj *unstructured.Un return obj, command.Err } -func (r *MockResourceOps) CreateResource(_ context.Context, obj *unstructured.Unstructured, _ cmdutil.DryRunStrategy, _ bool) (string, error) { +func (r *MockResourceOps) CreateResource(_ context.Context, obj *unstructured.Unstructured, dryRun cmdutil.DryRunStrategy, _ bool) (string, error) { + if dryRun != cmdutil.DryRunNone && !r.ExecuteForDryRun { + return "", nil + } r.SetLastResourceCommand(kube.GetResourceKey(obj), "create") command, ok := r.Commands[obj.GetName()] if !ok { diff --git a/pkg/utils/testing/api_resources.go b/pkg/utils/testing/api_resources.go new file mode 100644 index 000000000..ebfc53543 --- /dev/null +++ b/pkg/utils/testing/api_resources.go @@ -0,0 +1,81 @@ +package testing + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +var ( + commonVerbs = []string{"create", "get", "list", "watch", "update", "patch", "delete", "deletecollection"} + subresourceVerbs = []string{"get", "update", "patch"} +) + +// StaticAPIResources defines the common Kubernetes API resources that are usually returned by a DiscoveryClient +var StaticAPIResources = []*metav1.APIResourceList{ + { + GroupVersion: "v1", + APIResources: []metav1.APIResource{ + {Name: "pods", SingularName: "pod", Namespaced: true, Kind: "Pod", Verbs: commonVerbs}, + {Name: "pods/status", SingularName: "", Namespaced: true, Kind: "Pod", Verbs: subresourceVerbs}, + {Name: "pods/log", SingularName: "", Namespaced: true, Kind: "Pod", Verbs: []string{"get"}}, + {Name: "pods/exec", SingularName: "", Namespaced: true, Kind: "Pod", Verbs: []string{"create"}}, + {Name: "services", SingularName: "service", Namespaced: true, Kind: "Service", Verbs: commonVerbs}, + {Name: "services/status", SingularName: "", Namespaced: true, Kind: "Service", Verbs: subresourceVerbs}, + {Name: "configmaps", SingularName: "configmap", Namespaced: true, Kind: "ConfigMap", Verbs: commonVerbs}, + {Name: "secrets", SingularName: "secret", Namespaced: true, Kind: "Secret", Verbs: commonVerbs}, + {Name: "namespaces", SingularName: "namespace", Namespaced: false, Kind: "Namespace", Verbs: commonVerbs}, + {Name: "namespaces/status", SingularName: "", Namespaced: false, Kind: "Namespace", Verbs: subresourceVerbs}, + {Name: "nodes", SingularName: "node", Namespaced: false, Kind: "Node", Verbs: []string{"get", "list", "watch"}}, + {Name: "persistentvolumes", SingularName: "persistentvolume", Namespaced: false, Kind: "PersistentVolume", Verbs: commonVerbs}, + {Name: "persistentvolumeclaims", SingularName: "persistentvolumeclaim", Namespaced: true, Kind: "PersistentVolumeClaim", Verbs: commonVerbs}, + {Name: "persistentvolumeclaims/status", SingularName: "", Namespaced: true, Kind: "PersistentVolumeClaim", Verbs: subresourceVerbs}, + {Name: "events", SingularName: "event", Namespaced: true, Kind: "Event", Verbs: []string{"create", "get", "list", "watch"}}, + {Name: "serviceaccounts", SingularName: "serviceaccount", Namespaced: true, Kind: "ServiceAccount", Verbs: commonVerbs}, + }, + }, + { + GroupVersion: "apps/v1", + APIResources: []metav1.APIResource{ + {Name: "deployments", SingularName: "deployment", Namespaced: true, Kind: "Deployment", Verbs: commonVerbs}, + {Name: "deployments/status", SingularName: "", Namespaced: true, Kind: "Deployment", Verbs: subresourceVerbs}, + {Name: "deployments/scale", SingularName: "", Namespaced: true, Kind: "Scale", Verbs: subresourceVerbs}, + {Name: "statefulsets", SingularName: "statefulset", Namespaced: true, Kind: "StatefulSet", Verbs: commonVerbs}, + {Name: "statefulsets/status", SingularName: "", Namespaced: true, Kind: "StatefulSet", Verbs: subresourceVerbs}, + {Name: "statefulsets/scale", SingularName: "", Namespaced: true, Kind: "Scale", Verbs: subresourceVerbs}, + {Name: "daemonsets", SingularName: "daemonset", Namespaced: true, Kind: "DaemonSet", Verbs: commonVerbs}, + {Name: "daemonsets/status", SingularName: "", Namespaced: true, Kind: "DaemonSet", Verbs: subresourceVerbs}, + {Name: "replicasets", SingularName: "replicaset", Namespaced: true, Kind: "ReplicaSet", Verbs: commonVerbs}, + {Name: "replicasets/status", SingularName: "", Namespaced: true, Kind: "ReplicaSet", Verbs: subresourceVerbs}, + }, + }, + { + GroupVersion: "batch/v1", + APIResources: []metav1.APIResource{ + {Name: "jobs", SingularName: "job", Namespaced: true, Kind: "Job", Verbs: commonVerbs}, + {Name: "jobs/status", SingularName: "", Namespaced: true, Kind: "Job", Verbs: subresourceVerbs}, + {Name: "cronjobs", SingularName: "cronjob", Namespaced: true, Kind: "CronJob", Verbs: commonVerbs}, + {Name: "cronjobs/status", SingularName: "", Namespaced: true, Kind: "CronJob", Verbs: subresourceVerbs}, + }, + }, + { + GroupVersion: "rbac.authorization.k8s.io/v1", + APIResources: []metav1.APIResource{ + {Name: "roles", SingularName: "role", Namespaced: true, Kind: "Role", Verbs: commonVerbs}, + {Name: "rolebindings", SingularName: "rolebinding", Namespaced: true, Kind: "RoleBinding", Verbs: commonVerbs}, + {Name: "clusterroles", SingularName: "clusterrole", Namespaced: false, Kind: "ClusterRole", Verbs: commonVerbs}, + {Name: "clusterrolebindings", SingularName: "clusterrolebinding", Namespaced: false, Kind: "ClusterRoleBinding", Verbs: commonVerbs}, + }, + }, + { + GroupVersion: "networking.k8s.io/v1", + APIResources: []metav1.APIResource{ + {Name: "ingresses", SingularName: "ingress", Namespaced: true, Kind: "Ingress", Verbs: commonVerbs}, + {Name: "ingresses/status", SingularName: "", Namespaced: true, Kind: "Ingress", Verbs: subresourceVerbs}, + {Name: "networkpolicies", SingularName: "networkpolicy", Namespaced: true, Kind: "NetworkPolicy", Verbs: commonVerbs}, + }, + }, + { + GroupVersion: "policy/v1", + APIResources: []metav1.APIResource{ + {Name: "poddisruptionbudgets", SingularName: "poddisruptionbudget", Namespaced: true, Kind: "PodDisruptionBudget", Verbs: commonVerbs}, + {Name: "poddisruptionbudgets/status", SingularName: "", Namespaced: true, Kind: "PodDisruptionBudget", Verbs: subresourceVerbs}, + }, + }, +}