Skip to content

Commit 20164cb

Browse files
authored
[wsman-mk2] Simplify condition checking (#17961)
1 parent 1a663ac commit 20164cb

File tree

8 files changed

+44
-42
lines changed

8 files changed

+44
-42
lines changed

components/ws-daemon/pkg/controller/workspace_controller.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -235,15 +235,15 @@ func (wsc *WorkspaceController) handleWorkspaceStop(ctx context.Context, ws *wor
235235
return ctrl.Result{}, fmt.Errorf("workspace content was never ready")
236236
}
237237

238-
if wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) {
238+
if ws.IsConditionTrue(workspacev1.WorkspaceConditionBackupComplete) {
239239
return ctrl.Result{}, nil
240240
}
241241

242-
if wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)) {
242+
if ws.IsConditionTrue(workspacev1.WorkspaceConditionBackupFailure) {
243243
return ctrl.Result{}, nil
244244
}
245245

246-
if wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionAborted)) {
246+
if ws.IsConditionTrue(workspacev1.WorkspaceConditionAborted) {
247247
span.LogKV("event", "workspace was aborted")
248248
return ctrl.Result{}, nil
249249
}

components/ws-manager-api/go/crd/v1/workspace_types.go

+4
Original file line numberDiff line numberDiff line change
@@ -433,6 +433,10 @@ func (w *Workspace) IsHeadless() bool {
433433
return w.Spec.Type != WorkspaceTypeRegular
434434
}
435435

436+
func (w *Workspace) IsConditionTrue(condition WorkspaceCondition) bool {
437+
return wsk8s.ConditionPresentAndTrue(w.Status.Conditions, string(condition))
438+
}
439+
436440
func init() {
437441
SchemeBuilder.Register(&Workspace{}, &WorkspaceList{})
438442
}

components/ws-manager-mk2/controllers/metrics.go

+8-8
Original file line numberDiff line numberDiff line change
@@ -164,17 +164,17 @@ func (m *controllerMetrics) countWorkspaceStop(log *logr.Logger, ws *workspacev1
164164
var reason string
165165
if c := wsk8s.GetCondition(ws.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)); c != nil {
166166
reason = StopReasonFailed
167-
if !wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionEverReady)) {
167+
if !ws.IsConditionTrue(workspacev1.WorkspaceConditionEverReady) {
168168
// Don't record 'failed' if there was a start failure.
169169
reason = StopReasonStartFailure
170170
} else if strings.Contains(c.Message, "Pod ephemeral local storage usage exceeds the total limit of containers") {
171171
reason = StopReasonOutOfSpace
172172
}
173-
} else if wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionAborted)) {
173+
} else if ws.IsConditionTrue(workspacev1.WorkspaceConditionAborted) {
174174
reason = StopReasonAborted
175-
} else if wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionTimeout)) {
175+
} else if ws.IsConditionTrue(workspacev1.WorkspaceConditionTimeout) {
176176
reason = StopReasonTimeout
177-
} else if wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionClosed)) {
177+
} else if ws.IsConditionTrue(workspacev1.WorkspaceConditionClosed) {
178178
reason = StopReasonTabClosed
179179
} else {
180180
reason = StopReasonRegular
@@ -278,10 +278,10 @@ func newMetricState(ws *workspacev1.Workspace) metricState {
278278
recordedStartTime: ws.Status.Phase == workspacev1.WorkspacePhaseRunning,
279279
recordedInitFailure: wsk8s.ConditionWithStatusAndReason(ws.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady), false, workspacev1.ReasonInitializationFailure),
280280
recordedStartFailure: ws.Status.Phase == workspacev1.WorkspacePhaseStopped && isStartFailure(ws),
281-
recordedFailure: wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)),
282-
recordedContentReady: wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady)),
283-
recordedBackupFailed: wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)),
284-
recordedBackupCompleted: wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)),
281+
recordedFailure: ws.IsConditionTrue(workspacev1.WorkspaceConditionFailed),
282+
recordedContentReady: ws.IsConditionTrue(workspacev1.WorkspaceConditionContentReady),
283+
recordedBackupFailed: ws.IsConditionTrue(workspacev1.WorkspaceConditionBackupFailure),
284+
recordedBackupCompleted: ws.IsConditionTrue(workspacev1.WorkspaceConditionBackupComplete),
285285
}
286286
}
287287

components/ws-manager-mk2/controllers/status.go

+13-13
Original file line numberDiff line numberDiff line change
@@ -109,14 +109,14 @@ func (r *WorkspaceReconciler) updateWorkspaceStatus(ctx context.Context, workspa
109109
workspace.Status.Phase = *phase
110110
}
111111

112-
if failure != "" && !wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) {
112+
if failure != "" && !workspace.IsConditionTrue(workspacev1.WorkspaceConditionFailed) {
113113
// workspaces can fail only once - once there is a failed condition set, stick with it
114114
log.Info("workspace failed", "workspace", workspace.Name, "reason", failure)
115115
workspace.Status.SetCondition(workspacev1.NewWorkspaceConditionFailed(failure))
116116
r.Recorder.Event(workspace, corev1.EventTypeWarning, "Failed", failure)
117117
}
118118

119-
if workspace.IsHeadless() && !wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionsHeadlessTaskFailed)) {
119+
if workspace.IsHeadless() && !workspace.IsConditionTrue(workspacev1.WorkspaceConditionsHeadlessTaskFailed) {
120120
for _, cs := range pod.Status.ContainerStatuses {
121121
if cs.State.Terminated != nil && cs.State.Terminated.Message != "" {
122122
workspace.Status.SetCondition(workspacev1.NewWorkspaceConditionHeadlessTaskFailed(cs.State.Terminated.Message))
@@ -156,14 +156,14 @@ func (r *WorkspaceReconciler) updateWorkspaceStatus(ctx context.Context, workspa
156156
}
157157

158158
case pod.Status.Phase == corev1.PodRunning:
159-
everReady := wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionEverReady))
159+
everReady := workspace.IsConditionTrue(workspacev1.WorkspaceConditionEverReady)
160160
if everReady {
161161
// If the workspace has been ready before, stay in a Running state, even
162162
// if the workspace container is not ready anymore. This is to avoid the workspace
163163
// moving back to Initializing and becoming unusable.
164164
workspace.Status.Phase = workspacev1.WorkspacePhaseRunning
165165
} else {
166-
contentReady := wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady))
166+
contentReady := workspace.IsConditionTrue(workspacev1.WorkspaceConditionContentReady)
167167
var ideReady bool
168168
for _, cs := range pod.Status.ContainerStatuses {
169169
if cs.Ready {
@@ -176,7 +176,7 @@ func (r *WorkspaceReconciler) updateWorkspaceStatus(ctx context.Context, workspa
176176
if ready {
177177
// workspace is ready - hence content init is done
178178
workspace.Status.Phase = workspacev1.WorkspacePhaseRunning
179-
if !wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionEverReady)) {
179+
if !workspace.IsConditionTrue(workspacev1.WorkspaceConditionEverReady) {
180180
workspace.Status.SetCondition(workspacev1.NewWorkspaceConditionEverReady())
181181
}
182182
} else {
@@ -186,7 +186,7 @@ func (r *WorkspaceReconciler) updateWorkspaceStatus(ctx context.Context, workspa
186186
}
187187

188188
case workspace.IsHeadless() && (pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed):
189-
if pod.Status.Phase == corev1.PodSucceeded && !wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionEverReady)) {
189+
if pod.Status.Phase == corev1.PodSucceeded && !workspace.IsConditionTrue(workspacev1.WorkspaceConditionEverReady) {
190190
// Fix for Prebuilds that instantly succeed (e.g. empty task), sometimes we don't observe the
191191
// workspace `Running` phase for these, and never had the opportunity to add the EverReady condition.
192192
// This would then cause a "start failure" in the metrics. So we retroactively add the EverReady
@@ -229,7 +229,7 @@ func (r *WorkspaceReconciler) checkNodeDisappeared(ctx context.Context, workspac
229229
}
230230

231231
// If NodeDisappeared is already set, return early, we've already made the below checks previously.
232-
if wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionNodeDisappeared)) {
232+
if workspace.IsConditionTrue(workspacev1.WorkspaceConditionNodeDisappeared) {
233233
return nil
234234
}
235235

@@ -245,17 +245,17 @@ func (r *WorkspaceReconciler) checkNodeDisappeared(ctx context.Context, workspac
245245
}
246246

247247
func isDisposalFinished(ws *workspacev1.Workspace) bool {
248-
return wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) ||
249-
wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)) ||
250-
wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionAborted)) ||
248+
return ws.IsConditionTrue(workspacev1.WorkspaceConditionBackupComplete) ||
249+
ws.IsConditionTrue(workspacev1.WorkspaceConditionBackupFailure) ||
250+
ws.IsConditionTrue(workspacev1.WorkspaceConditionAborted) ||
251251
// Nothing to dispose if content wasn't ready.
252-
!wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady)) ||
252+
!ws.IsConditionTrue(workspacev1.WorkspaceConditionContentReady) ||
253253
// Can't dispose if node disappeared.
254-
wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionNodeDisappeared)) ||
254+
ws.IsConditionTrue(workspacev1.WorkspaceConditionNodeDisappeared) ||
255255
// Image builds have nothing to dispose.
256256
ws.Spec.Type == workspacev1.WorkspaceTypeImageBuild ||
257257
// headless workspaces that failed do not need to be backed up
258-
wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionsHeadlessTaskFailed))
258+
ws.IsConditionTrue(workspacev1.WorkspaceConditionsHeadlessTaskFailed)
259259
}
260260

261261
// extractFailure returns a pod failure reason and possibly a phase. If phase is nil then

components/ws-manager-mk2/controllers/timeout_controller.go

+3-4
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ import (
1919
"sigs.k8s.io/controller-runtime/pkg/controller"
2020
"sigs.k8s.io/controller-runtime/pkg/log"
2121

22-
wsk8s "github.com/gitpod-io/gitpod/common-go/kubernetes"
2322
"github.com/gitpod-io/gitpod/common-go/util"
2423
wsactivity "github.com/gitpod-io/gitpod/ws-manager-mk2/pkg/activity"
2524
"github.com/gitpod-io/gitpod/ws-manager-mk2/pkg/maintenance"
@@ -81,7 +80,7 @@ func (r *TimeoutReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
8180
return ctrl.Result{}, client.IgnoreNotFound(err)
8281
}
8382

84-
if wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionTimeout)) {
83+
if workspace.IsConditionTrue(workspacev1.WorkspaceConditionTimeout) {
8584
// Workspace has already been marked as timed out.
8685
// Return and don't requeue another reconciliation.
8786
return ctrl.Result{}, nil
@@ -159,7 +158,7 @@ func (r *TimeoutReconciler) isWorkspaceTimedOut(ws *workspacev1.Workspace) (reas
159158

160159
start := ws.ObjectMeta.CreationTimestamp.Time
161160
lastActivity := r.activity.GetLastActivity(ws)
162-
isClosed := wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionClosed))
161+
isClosed := ws.IsConditionTrue(workspacev1.WorkspaceConditionClosed)
163162

164163
switch phase {
165164
case workspacev1.WorkspacePhasePending:
@@ -213,7 +212,7 @@ func (r *TimeoutReconciler) isWorkspaceTimedOut(ws *workspacev1.Workspace) (reas
213212
return decide(*lastActivity, timeout, activity)
214213

215214
case workspacev1.WorkspacePhaseStopping:
216-
if isWorkspaceBeingDeleted(ws) && !wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) {
215+
if isWorkspaceBeingDeleted(ws) && !ws.IsConditionTrue(workspacev1.WorkspaceConditionBackupComplete) {
217216
// Beware: we apply the ContentFinalization timeout only to workspaces which are currently being deleted.
218217
// We basically don't expect a workspace to be in content finalization before it's been deleted.
219218
return decide(ws.DeletionTimestamp.Time, timeouts.ContentFinalization, activityBackup)

components/ws-manager-mk2/controllers/workspace_controller.go

+11-11
Original file line numberDiff line numberDiff line change
@@ -241,11 +241,11 @@ func (r *WorkspaceReconciler) actOnStatus(ctx context.Context, workspace *worksp
241241

242242
switch {
243243
// if there is a pod, and it's failed, delete it
244-
case wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) && !isPodBeingDeleted(pod):
244+
case workspace.IsConditionTrue(workspacev1.WorkspaceConditionFailed) && !isPodBeingDeleted(pod):
245245
return r.deleteWorkspacePod(ctx, pod, "workspace failed")
246246

247247
// if the pod was stopped by request, delete it
248-
case wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionStoppedByRequest)) && !isPodBeingDeleted(pod):
248+
case workspace.IsConditionTrue(workspacev1.WorkspaceConditionStoppedByRequest) && !isPodBeingDeleted(pod):
249249
var gracePeriodSeconds *int64
250250
if c := wsk8s.GetCondition(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionStoppedByRequest)); c != nil {
251251
if dt, err := time.ParseDuration(c.Message); err == nil {
@@ -263,11 +263,11 @@ func (r *WorkspaceReconciler) actOnStatus(ctx context.Context, workspace *worksp
263263
}
264264

265265
// if the node disappeared, delete the pod.
266-
case wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionNodeDisappeared)) && !isPodBeingDeleted(pod):
266+
case workspace.IsConditionTrue(workspacev1.WorkspaceConditionNodeDisappeared) && !isPodBeingDeleted(pod):
267267
return r.deleteWorkspacePod(ctx, pod, "node disappeared")
268268

269269
// if the workspace timed out, delete it
270-
case wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionTimeout)) && !isPodBeingDeleted(pod):
270+
case workspace.IsConditionTrue(workspacev1.WorkspaceConditionTimeout) && !isPodBeingDeleted(pod):
271271
return r.deleteWorkspacePod(ctx, pod, "timed out")
272272

273273
// if the content initialization failed, delete the pod
@@ -323,23 +323,23 @@ func (r *WorkspaceReconciler) updateMetrics(ctx context.Context, workspace *work
323323
lastState.recordedInitFailure = true
324324
}
325325

326-
if !lastState.recordedFailure && wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) {
326+
if !lastState.recordedFailure && workspace.IsConditionTrue(workspacev1.WorkspaceConditionFailed) {
327327
r.metrics.countWorkspaceFailure(&log, workspace)
328328
lastState.recordedFailure = true
329329
}
330330

331-
if !lastState.recordedContentReady && wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady)) {
331+
if !lastState.recordedContentReady && workspace.IsConditionTrue(workspacev1.WorkspaceConditionContentReady) {
332332
r.metrics.countTotalRestores(&log, workspace)
333333
lastState.recordedContentReady = true
334334
}
335335

336-
if !lastState.recordedBackupFailed && wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)) {
336+
if !lastState.recordedBackupFailed && workspace.IsConditionTrue(workspacev1.WorkspaceConditionBackupFailure) {
337337
r.metrics.countTotalBackups(&log, workspace)
338338
r.metrics.countTotalBackupFailures(&log, workspace)
339339
lastState.recordedBackupFailed = true
340340
}
341341

342-
if !lastState.recordedBackupCompleted && wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) {
342+
if !lastState.recordedBackupCompleted && workspace.IsConditionTrue(workspacev1.WorkspaceConditionBackupComplete) {
343343
r.metrics.countTotalBackups(&log, workspace)
344344
lastState.recordedBackupCompleted = true
345345
}
@@ -368,12 +368,12 @@ func (r *WorkspaceReconciler) updateMetrics(ctx context.Context, workspace *work
368368

369369
func isStartFailure(ws *workspacev1.Workspace) bool {
370370
// Consider workspaces that never became ready as start failures.
371-
everReady := wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionEverReady))
371+
everReady := ws.IsConditionTrue(workspacev1.WorkspaceConditionEverReady)
372372
// Except for aborted prebuilds, as they can get aborted before becoming ready, which shouldn't be counted
373373
// as a start failure.
374-
isAborted := wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionAborted))
374+
isAborted := ws.IsConditionTrue(workspacev1.WorkspaceConditionAborted)
375375
// Also ignore workspaces that are requested to be stopped before they became ready.
376-
isStoppedByRequest := wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionStoppedByRequest))
376+
isStoppedByRequest := ws.IsConditionTrue(workspacev1.WorkspaceConditionStoppedByRequest)
377377
return !everReady && !isAborted && !isStoppedByRequest
378378
}
379379

components/ws-manager-mk2/pkg/activity/activity.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import (
88
"sync"
99
"time"
1010

11-
wsk8s "github.com/gitpod-io/gitpod/common-go/kubernetes"
1211
workspacev1 "github.com/gitpod-io/gitpod/ws-manager/api/crd/v1"
1312
)
1413

@@ -38,7 +37,7 @@ func (w *WorkspaceActivity) GetLastActivity(ws *workspacev1.Workspace) *time.Tim
3837

3938
// In case we don't have a record of the workspace's last activity, check for the FirstUserActivity condition
4039
// to see if the lastActivity got lost on a manager restart.
41-
if wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionFirstUserActivity)) {
40+
if ws.IsConditionTrue(workspacev1.WorkspaceConditionFirstUserActivity) {
4241
// Manager was restarted, consider the workspace's last activity to be the time the manager restarted.
4342
return &w.ManagerStartedAt
4443
}

components/ws-manager-mk2/service/manager.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -495,7 +495,7 @@ func (wsm *WorkspaceManagerServer) MarkActive(ctx context.Context, req *wsmanapi
495495

496496
// We do however maintain the the "closed" flag as condition on the workspace. This flag should not change
497497
// very often and provides a better UX if it persists across ws-manager restarts.
498-
isMarkedClosed := wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionClosed))
498+
isMarkedClosed := ws.IsConditionTrue(workspacev1.WorkspaceConditionClosed)
499499
if req.Closed && !isMarkedClosed {
500500
err = wsm.modifyWorkspace(ctx, req.Id, true, func(ws *workspacev1.Workspace) error {
501501
ws.Status.SetCondition(workspacev1.NewWorkspaceConditionClosed(metav1.ConditionTrue, "MarkActiveRequest"))

0 commit comments

Comments
 (0)