From 8a2a2b81f0c2e3f496ba469bfb614bbf63836661 Mon Sep 17 00:00:00 2001 From: Jerod <7307833+xjerod@users.noreply.github.com> Date: Thu, 2 Apr 2026 07:24:22 -0700 Subject: [PATCH 1/4] selfhosted/controlplane: new doc for how to manually enable scylladb auth (#316) These are the manual steps for enabling intra-cluster auth for scylladb in selfhosted environments. --- .../SELFHOSTED_scylladb_authentication.md | 163 ++++++++++++++++++ 1 file changed, 163 insertions(+) create mode 100644 charts/controlplane/docs/SELFHOSTED_scylladb_authentication.md diff --git a/charts/controlplane/docs/SELFHOSTED_scylladb_authentication.md b/charts/controlplane/docs/SELFHOSTED_scylladb_authentication.md new file mode 100644 index 00000000..6dd4cd30 --- /dev/null +++ b/charts/controlplane/docs/SELFHOSTED_scylladb_authentication.md @@ -0,0 +1,163 @@ +# ScyllaDB Authentication for Selfhosted Deployments + +This guide explains how to enable CQL authentication on the ScyllaDB cluster deployed by the controlplane chart and configure the queue service to authenticate. + +## Prerequisites + +- ScyllaDB CRDs installed ([`scripts/install-scylla-crds.sh`](../scripts/install-scylla-crds.sh)) +- Control plane namespace created (e.g. `union-cp`) +- The service secret already exists with the Postgres `pass.txt` key (see main deployment guide) + +## Overview + +By default, ScyllaDB runs with `developerMode: true`, which disables authentication entirely. Enabling authentication requires three things: + +1. A ConfigMap that tells ScyllaDB to use `PasswordAuthenticator` +2. Changing the default superuser password after first boot +3. Configuring the queue service with the matching credentials + +## Step 1: Choose a password + +Generate or choose a password for the ScyllaDB `cassandra` superuser: + +```bash +# Generate a random 32-character password +SCYLLA_PASSWORD=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | head -c 32) +echo "$SCYLLA_PASSWORD" # Save this — you'll need it in steps 2 and 6 +``` + +## Step 2: Add the password to the service secret + +Add a `scylla-password` key to the existing service secret (the one referenced by `global.KUBERNETES_SECRET_NAME`, typically `union-controlplane-secrets`): + +```bash +kubectl create secret generic union-controlplane-secrets \ + --from-literal=pass.txt='' \ + --from-literal=scylla-password="$SCYLLA_PASSWORD" \ + -n union-cp --dry-run=client -o yaml | kubectl apply -f - +``` + +The queue service mounts this secret at `/etc/secrets/union/`. The `scylla-password` key becomes the file `/etc/secrets/union/scylla-password`, which the queue service reads at startup. + +## Step 3: Configure Helm values + +In your customer overrides file, disable developer mode, reference the auth ConfigMap, and set the queue service credentials: + +```yaml +scylla: + developerMode: false + racks: + - name: rack1 + scyllaConfig: scylla-config # references the ConfigMap created by the chart + members: 3 + storage: + capacity: 100Gi + storageClassName: "scylladb" + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + placement: + nodeAffinity: {} + tolerations: [] + +services: + queue: + configMap: + queue: + db: + hosts: + - "scylla-client.union-cp.svc.cluster.local" + threadCount: 64 + type: cql + username: "cassandra" + passwordName: "union/scylla-password" +``` + +The `passwordName` value `union/scylla-password` tells the queue service to read the password from `/etc/secrets/union/scylla-password` (the secret file prefix `/etc/secrets` is combined with the `passwordName`). + +## Step 4: Create the ScyllaDB authentication ConfigMap + +The `scyllaConfig: scylla-config` field in the rack definition references a ConfigMap that provides ScyllaDB configuration overrides. Create it to enable `PasswordAuthenticator` and `CassandraAuthorizer`: + +```bash +kubectl apply -n union-cp -f - <<'EOF' +apiVersion: v1 +kind: ConfigMap +metadata: + name: scylla-config +data: + scylla.yaml: | + authenticator: PasswordAuthenticator + authorizer: CassandraAuthorizer +EOF +``` + +This ConfigMap must exist **before** the ScyllaDB cluster starts. Without it, ScyllaDB uses `AllowAllAuthenticator` regardless of `developerMode`. + +## Step 5: Deploy the control plane + +```bash +helm upgrade --install unionai-controlplane unionai/controlplane \ + --namespace union-cp \ + -f values.aws.selfhosted-intracluster.yaml \ + -f values.selfhosted-customer.yaml \ + --timeout 15m \ + --wait +``` + +## Step 6: Change the default superuser password + +After the ScyllaDB cluster is healthy, the default `cassandra` superuser has password `cassandra`. You must change it to match the password stored in the secret: + +```bash +# Wait for the cluster to be ready +kubectl wait --for=condition=Available scyllacluster/scylla -n union-cp --timeout=300s + +# Connect and change the password +kubectl exec -it scylla-dc1-rack1-0 -n union-cp -c scylla -- \ + cqlsh localhost -u cassandra -p cassandra \ + -e "ALTER USER cassandra WITH PASSWORD '$SCYLLA_PASSWORD';" +``` + +Verify the new password works: + +```bash +kubectl exec -it scylla-dc1-rack1-0 -n union-cp -c scylla -- \ + cqlsh localhost -u cassandra -p "$SCYLLA_PASSWORD" \ + -e "DESCRIBE CLUSTER;" +``` + +## Step 7: Restart the queue service + +If the queue service started before the password was changed, restart it so it picks up the working credentials: + +```bash +kubectl rollout restart deployment/queue -n union-cp +``` + +## Verification + +Confirm the queue service is connected and authenticated: + +```bash +kubectl logs -n union-cp deploy/queue | grep -i "cql\|scylla\|session" +``` + +The queue service should log a successful session creation without authentication errors. + +## Troubleshooting + +**Queue service fails with "failed to get cql db password"** +- Verify the secret has the `scylla-password` key: `kubectl get secret union-controlplane-secrets -n union-cp -o jsonpath='{.data.scylla-password}' | base64 -d` +- Verify `passwordName` in the values matches the secret key path (`union/scylla-password`) + +**Queue service connects but gets "AuthenticationError"** +- The password in the secret doesn't match the ScyllaDB superuser password. Re-run Step 5 or update the secret. + +**ScyllaDB pods crash-loop after disabling developerMode** +- Ensure the `scylla-config` ConfigMap exists and contains valid `scylla.yaml` (see Step 4). +- Check that sysctl `fs.aio-max-nr=30000000` is allowed on your nodes (required outside developer mode). From 5726de5ee3aa350c86b39af55e6acc9b8d189879 Mon Sep 17 00:00:00 2001 From: Michael Hotan Date: Fri, 3 Apr 2026 09:21:39 +1100 Subject: [PATCH 2/4] Provide explicit externalClient.forwardHeaders defaults (#317) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Move `externalClient.forwardHeaders` out of the commented External example block into explicit defaults. Customers see what headers are forwarded to their external authorization server without reading Go code. Prevents accidentally overriding defaults with a partial list when customizing. ## Test Plan - [x] `make generate-expected` — snapshots regenerated - [x] All test variants include explicit forwardHeaders in rendered configmaps ## Rollout Plan (required) Safe to merge — adds explicit values that match existing Go defaults. No behavior change. ## Rollback Plan (required) Revert PR. Go code `DefaultForwardHeaders` continues to provide the same defaults. ## Checklist * [x] Added tests * [ ] Ran manual integration tests * [ ] Ran a deploy dry run * [ ] Added logging and metrics * [ ] Updated dashboards and alerts * [ ] Updated documentation --- charts/controlplane/values.yaml | 17 +++++++++++------ .../controlplane.aws.billing-enable.yaml | 4 ++++ tests/generated/controlplane.aws.yaml | 4 ++++ tests/generated/controlplane.userclouds.yaml | 4 ++++ 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/charts/controlplane/values.yaml b/charts/controlplane/values.yaml index 2e529188..747ec036 100644 --- a/charts/controlplane/values.yaml +++ b/charts/controlplane/values.yaml @@ -423,17 +423,22 @@ services: # # Max delay for gRPC backoff between retries. # # maxBackoffDelay: "10s" # - # # Incoming gRPC metadata keys to forward to the external server. - # # Default: ["authorization", "flyte-authorization"] - # # forwardHeaders: - # # - authorization - # # - flyte-authorization - # # # If true, allow requests when the external server is unreachable. # # If false (default), deny on error. # failOpen: false authorizer: type: "Noop" + # --- External client defaults --- + # Headers forwarded from the authorizer to the external server on + # each Authorize() call. These carry the caller's OIDC token so the + # external server can inspect JWT claims for authorization decisions. + # Always included even when type is "Noop" or "UserClouds" (ignored + # by those backends). Customers adding External authorization should + # see these explicitly rather than relying on hidden Go defaults. + externalClient: + forwardHeaders: + - authorization + - flyte-authorization sharedService: connectPort: 8081 metrics: diff --git a/tests/generated/controlplane.aws.billing-enable.yaml b/tests/generated/controlplane.aws.billing-enable.yaml index 3e30434c..692ee34e 100644 --- a/tests/generated/controlplane.aws.billing-enable.yaml +++ b/tests/generated/controlplane.aws.billing-enable.yaml @@ -613,6 +613,10 @@ data: grpcConfig: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true + externalClient: + forwardHeaders: + - authorization + - flyte-authorization type: Noop cache: identity: diff --git a/tests/generated/controlplane.aws.yaml b/tests/generated/controlplane.aws.yaml index df2420df..a0f4fa5d 100644 --- a/tests/generated/controlplane.aws.yaml +++ b/tests/generated/controlplane.aws.yaml @@ -613,6 +613,10 @@ data: grpcConfig: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true + externalClient: + forwardHeaders: + - authorization + - flyte-authorization type: Noop cache: identity: diff --git a/tests/generated/controlplane.userclouds.yaml b/tests/generated/controlplane.userclouds.yaml index 4619508d..c1509b4f 100644 --- a/tests/generated/controlplane.userclouds.yaml +++ b/tests/generated/controlplane.userclouds.yaml @@ -613,6 +613,10 @@ data: grpcConfig: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true + externalClient: + forwardHeaders: + - authorization + - flyte-authorization type: Noop cache: identity: From 3a2fa5d8026b1aefc3c19dfb4242afa47b4c5f78 Mon Sep 17 00:00:00 2001 From: Michael Hotan Date: Fri, 3 Apr 2026 11:18:12 +1100 Subject: [PATCH 3/4] Add useExternalIdentity config via global.USE_EXTERNAL_IDENTITY (FAB-189) (#320) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Add `useExternalIdentity` config to the authorizer section across all services. When `global.USE_EXTERNAL_IDENTITY` is `"true"`, all services construct `ExternalIdentity` for subjects without the `identitytype` JWT claim. **Why a global?** flyteadmin and cacheservice configs render inside the flyte-core subchart scope where `.Values.services` is not accessible. Using `global.USE_EXTERNAL_IDENTITY` makes the value available across all template scopes. A future release removing the flyte-core subchart dependency will allow direct derivation from `authorizer.type`. Verified on all 9 services: flyteadmin, cacheservice, authorizer, cluster, dataproxy, executions, queue, run-scheduler, usage. ## Customer-Facing Change Log [selfhosted] External authorization now supports IdPs that do not provide custom JWT claims (e.g. Apple IdMS, Entra ID). **New configuration:** Add `global.USE_EXTERNAL_IDENTITY: "true"` to your controlplane values overlay when using External authorization (`services.authorizer.configMap.authorizer.type: "External"`) with an IdP that does not include the `identitytype` custom claim in JWTs. **No migration needed** for existing deployments. This setting only affects new selfhosted deployments enabling External authorization for the first time. Existing deployments with Okta (which provides the `identitytype` claim) are unaffected — the setting defaults to `"false"`. ## Test Plan (required) - [x] New `controlplane.external-authz` test variant — all 9 service configmaps render `useExternalIdentity: 'true'` - [x] Existing `controlplane.aws` test — all configmaps render `useExternalIdentity: 'false'` - [x] `make generate-expected` — snapshots regenerated and pass - [x] Deployed to mike staging env — external authz server receives and authorizes requests ## Rollout Plan (required) Set `global.USE_EXTERNAL_IDENTITY: "true"` in values overlay. No behavior change unless set. For Union-managed selfhosted environments, Terraform derives this from `authorization_mode == "External"`. ## Rollback Plan (required) Remove global or set to `"false"`. Restores fail-fast behavior. ## Issue ref FAB-189 Companion to cloud [#15185](https://github.com/unionai/cloud/pull/15185). ## Checklist * [x] Added tests * [x] Ran manual integration tests * [x] Ran a deploy dry run * [ ] Added logging and metrics * [ ] Updated dashboards and alerts * [x] Updated documentation --- charts/controlplane/values.yaml | 25 +- .../controlplane.aws.billing-enable.yaml | 13 +- tests/generated/controlplane.aws.yaml | 13 +- .../controlplane.external-authz.yaml | 9812 +++++++++++++++++ tests/generated/controlplane.userclouds.yaml | 13 +- tests/values/controlplane.external-authz.yaml | 48 + 6 files changed, 9909 insertions(+), 15 deletions(-) create mode 100644 tests/generated/controlplane.external-authz.yaml create mode 100644 tests/values/controlplane.external-authz.yaml diff --git a/charts/controlplane/values.yaml b/charts/controlplane/values.yaml index 747ec036..60a96f57 100644 --- a/charts/controlplane/values.yaml +++ b/charts/controlplane/values.yaml @@ -253,6 +253,16 @@ configMap: # or External) — see services.authorizer.configMap below. authorizer: type: "Authorizer" + # When true, construct ExternalIdentity for subjects without an + # identitytype JWT claim. Required for External authorization with + # non-Okta IdPs that cannot add custom claims. (FAB-189) + # + # Ideally this would be derived from services.authorizer.configMap.authorizer.type, + # but the flyte-core subchart renders configs in its own template scope where + # .Values.services is not accessible. Set via global.USE_EXTERNAL_IDENTITY + # in your values overlay. A future release removing the flyte-core subchart + # dependency will allow direct derivation. + useExternalIdentity: '{{ default "false" .Values.global.USE_EXTERNAL_IDENTITY }}' authorizerClient: grpcConfig: host: 'dns:///authorizer.{{ .Release.Namespace }}.svc.cluster.local:80' @@ -1167,16 +1177,12 @@ flyte: endpoint: 'dns:///{{ .Values.global.UNION_HOST }}' insecure: false # flyteadmin routes authorization to the in-cluster authorizer service. - # For Union Cloud with UserClouds, override with: - # type: "UserClouds" - # userCloudsClient: - # tenantUrl: 'http://{{ .Release.Name }}-union-authz.{{ .Release.Namespace }}.svc.cluster.local:8080' - # tenantID: '623771e7-ddd6-4575-bedb-7c970ec75b87' - # clientID: "union-authz-client" - # clientSecretName: 'union/client_secret' - # enableLogging: true authorizer: type: "Authorizer" + # Set via global.USE_EXTERNAL_IDENTITY in your values overlay. + # Cannot cross-reference services.authorizer.configMap here because + # this renders inside the flyte subchart scope. (FAB-189) + useExternalIdentity: '{{ default "false" .Values.global.USE_EXTERNAL_IDENTITY }}' authorizerClient: grpcConfig: host: 'dns:///authorizer.{{ .Release.Namespace }}.svc.cluster.local:80' @@ -1228,9 +1234,10 @@ flyte: enabled: true urlPattern: '{{ printf "_SERVICE_.%s.svc.cluster.local:80" .Release.Namespace }}' # cacheservice routes authorization to the in-cluster authorizer service. - # For Union Cloud with UserClouds, override with the same pattern as flyteadmin above. authorizer: type: "Authorizer" + # Set via global.USE_EXTERNAL_IDENTITY in your values overlay. (FAB-189) + useExternalIdentity: '{{ default "false" .Values.global.USE_EXTERNAL_IDENTITY }}' authorizerClient: grpcConfig: host: 'dns:///authorizer.{{ .Release.Namespace }}.svc.cluster.local:80' diff --git a/tests/generated/controlplane.aws.billing-enable.yaml b/tests/generated/controlplane.aws.billing-enable.yaml index 692ee34e..48412416 100644 --- a/tests/generated/controlplane.aws.billing-enable.yaml +++ b/tests/generated/controlplane.aws.billing-enable.yaml @@ -428,6 +428,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cloudEvents: enable: false connection: @@ -556,6 +557,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache-server: grpcPort: 8089 grpcServerReflection: true @@ -618,6 +620,7 @@ data: - authorization - flyte-authorization type: Noop + useExternalIdentity: 'false' cache: identity: enabled: false @@ -674,6 +677,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -745,6 +749,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -804,6 +809,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -884,6 +890,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -949,6 +956,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -1014,6 +1022,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' billing: enable: true cache: @@ -5924,7 +5933,7 @@ spec: template: metadata: annotations: - configChecksum: "fc871cf99064ca6139c9fcc466301e09c5e563224da119eee7917619533d186" + configChecksum: "dc3dc4f3816ef7148d98e27dcc7defab7a44bd71f5b0f04b7ea159f820ee7a5" kubectl.kubernetes.io/default-container: flyteadmin labels: app.kubernetes.io/name: flyteadmin @@ -6303,7 +6312,7 @@ spec: template: metadata: annotations: - configChecksum: "834100fee446d8308f7d12fe24d7a68755250921e563db9c09155da8a5826e8" + configChecksum: "2e0dd8aaa6e10c3fa6adb425cef7da9523898c3aa6de3c297f60838d7142d31" linkerd.io/inject: disabled prometheus.io/path: /metrics prometheus.io/port: "10254" diff --git a/tests/generated/controlplane.aws.yaml b/tests/generated/controlplane.aws.yaml index a0f4fa5d..cc1a528f 100644 --- a/tests/generated/controlplane.aws.yaml +++ b/tests/generated/controlplane.aws.yaml @@ -428,6 +428,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cloudEvents: enable: false connection: @@ -556,6 +557,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache-server: grpcPort: 8089 grpcServerReflection: true @@ -618,6 +620,7 @@ data: - authorization - flyte-authorization type: Noop + useExternalIdentity: 'false' cache: identity: enabled: false @@ -674,6 +677,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -745,6 +749,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -804,6 +809,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -884,6 +890,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -949,6 +956,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -1014,6 +1022,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' billing: enable: false cache: @@ -5924,7 +5933,7 @@ spec: template: metadata: annotations: - configChecksum: "fc871cf99064ca6139c9fcc466301e09c5e563224da119eee7917619533d186" + configChecksum: "dc3dc4f3816ef7148d98e27dcc7defab7a44bd71f5b0f04b7ea159f820ee7a5" kubectl.kubernetes.io/default-container: flyteadmin labels: app.kubernetes.io/name: flyteadmin @@ -6303,7 +6312,7 @@ spec: template: metadata: annotations: - configChecksum: "834100fee446d8308f7d12fe24d7a68755250921e563db9c09155da8a5826e8" + configChecksum: "2e0dd8aaa6e10c3fa6adb425cef7da9523898c3aa6de3c297f60838d7142d31" linkerd.io/inject: disabled prometheus.io/path: /metrics prometheus.io/port: "10254" diff --git a/tests/generated/controlplane.external-authz.yaml b/tests/generated/controlplane.external-authz.yaml new file mode 100644 index 00000000..21337025 --- /dev/null +++ b/tests/generated/controlplane.external-authz.yaml @@ -0,0 +1,9812 @@ +--- +# Source: controlplane/templates/scylla/namespaces.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: scylla-operator +--- +# Source: controlplane/charts/scylla-operator/templates/operator.pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: scylla-operator + namespace: scylla-operator +spec: + minAvailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: scylla-operator + app.kubernetes.io/instance: scylla-operator +--- +# Source: controlplane/charts/scylla-operator/templates/webhookserver.pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: webhook-server + namespace: scylla-operator +spec: + minAvailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook-server + app.kubernetes.io/instance: webhook-server +--- +# Source: controlplane/templates/console/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: unionconsole + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/flyte-core-pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: flyteadmin + namespace: union +spec: + minAvailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: flyteadmin +--- +# Source: controlplane/templates/flyte-core-pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: datacatalog + namespace: union +spec: + minAvailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: datacatalog +--- +# Source: controlplane/templates/flyte-core-pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cacheservice + namespace: union +spec: + minAvailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: cacheservice +--- +# Source: controlplane/templates/pdb.yaml +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: authorizer +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cluster +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: dataproxy +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: executions +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: queue +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: run-scheduler +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: usage +spec: + minAvailable: "33%" + selector: + matchLabels: + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/charts/flyte/templates/admin/rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flyteadmin + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/charts/scylla-operator/templates/operator.serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scylla-operator + namespace: scylla-operator + labels: + app.kubernetes.io/name: scylla-operator + app.kubernetes.io/instance: scylla-operator +--- +# Source: controlplane/charts/scylla-operator/templates/webhookserver.serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: scylla-operator + name: webhook-server + labels: + app.kubernetes.io/name: webhook-server + app.kubernetes.io/instance: webhook-server +--- +# Source: controlplane/templates/cacheservice/rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cacheservice + namespace: union + labels: + app.kubernetes.io/name: cacheservice + app.kubernetes.io/instance: release-name + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/console/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: unionconsole + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/serviceaccount.yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: authorizer + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dataproxy + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: executions + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: queue + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: run-scheduler + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: usage + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +--- +# Source: controlplane/charts/flyte/templates/admin/secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: flyte-admin-secrets + namespace: union +type: Opaque +stringData: +--- +# Source: controlplane/charts/flyte/templates/common/secret-auth.yaml +apiVersion: v1 +kind: Secret +metadata: + name: flyte-secret-auth + namespace: union +type: Opaque +stringData: + client_secret: foobar +--- +# Source: controlplane/charts/flyte/templates/admin/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: flyte-admin-clusters-config + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +data: + clusters.yaml: | + clusters: + clusterConfigs: [] + labelClusterMap: {} +--- +# Source: controlplane/charts/flyte/templates/admin/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: flyte-admin-base-config + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +data: + db.yaml: | + database: + connMaxLifeTime: 120s + dbname: flyteadmin + host: '' + maxIdleConnections: 10 + maxOpenConnections: 80 + passwordPath: /etc/db/pass.txt + port: 5432 + username: '' + domain.yaml: | + domains: + - id: development + name: development + - id: staging + name: staging + - id: production + name: production + logger.yaml: | + level: null + otel.yaml: | + otel: + file: + filename: /tmp/trace.txt + jaeger: + endpoint: http://localhost:14268/api/traces + otlpgrpc: + endpoint: http://localhost:4317 + otlphttp: + endpoint: http://localhost:4318/v1/traces + sampler: + parentSampler: always + type: noop + server.yaml: | + admin: + endpoint: dns:/// + insecure: false + auth: + appAuth: + thirdPartyConfig: + flyteClient: + clientId: flytectl + redirectUri: http://localhost:53593/callback + scopes: + - offline + - all + authorizedUris: + - https://localhost:30081 + - http://flyteadmin:80 + - http://flyteadmin.flyte.svc.cluster.local:80 + userAuth: + openId: + baseUrl: https://accounts.google.com + clientId: 657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com + scopes: + - profile + - openid + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + cloudEvents: + enable: false + connection: + environment: staging + region: '' + rootTenantURLPattern: dns:/// + flyteadmin: + eventVersion: 2 + metadataStoragePrefix: + - metadata + - admin + metricsKeys: + - phase + metricsScope: 'flyte:' + profilerPort: 10254 + roleNameKey: iam.amazonaws.com/role + useOffloadedInputs: true + useOffloadedWorkflowClosure: true + otel: + type: noop + private: + app: + cacheProviderConfig: + kind: bypass + populateUserFields: false + server: + grpc: + port: 8089 + httpPort: 8088 + security: + allowCors: true + allowedHeaders: + - Content-Type + - flyte-authorization + allowedOrigins: + - '*' + secure: false + useAuth: false + sharedService: + connectPort: 8089 + httpPort: 8088 + port: 8089 + selfServeConfig: + legacyHosts: + - '' + union: + internalConnectionConfig: + enabled: true + urlPattern: '_SERVICE_.union.svc.cluster.local:80' + remoteData.yaml: | + remoteData: + region: us-east-1 + scheme: local + signedUrls: + durationMinutes: 3 + storage.yaml: | + storage: + type: s3 + container: "" + connection: + auth-type: iam + region: + enable-multicontainer: false + limits: + maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 + task_resource_defaults.yaml: | + task_resources: + defaults: + cpu: 100m + memory: 500Mi + limits: + cpu: 2 + gpu: 1 + memory: 1Gi +--- +# Source: controlplane/charts/flyte/templates/console/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: flyte-console-config + namespace: union + labels: + app.kubernetes.io/name: flyteconsole + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + app.kubernetes.io/managed-by: Helm +data: + BASE_URL: /console + CONFIG_DIR: /etc/flyte/config +--- +# Source: controlplane/templates/cacheservice/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cacheservice-config + namespace: union + labels: + app.kubernetes.io/name: cacheservice + app.kubernetes.io/instance: release-name + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/managed-by: Helm +data: + db.yaml: | + database: + connMaxLifeTime: 120s + dbname: cacheservice + host: '' + maxIdleConnections: 10 + maxOpenConnections: 20 + passwordPath: /etc/db/pass.txt + port: 5432 + username: '' + logger.yaml: | + level: null + server.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + cache-server: + grpcPort: 8089 + grpcServerReflection: true + httpPort: 8080 + cacheservice: + heartbeat-grace-period-multiplier: 3 + max-reservation-heartbeat: 30s + metrics-scope: flyte + profiler-port: 10254 + storage-prefix: cached_outputs + otel: + type: noop + private: + app: + cacheProviderConfig: + kind: bypass + union: + internalConnectionConfig: + enabled: true + urlPattern: '_SERVICE_.union.svc.cluster.local:80' + storage.yaml: | + storage: + type: s3 + container: "" + connection: + auth-type: iam + region: + enable-multicontainer: false + limits: + maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 +--- +# Source: controlplane/templates/configmap.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: authorizer + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + externalClient: + failOpen: false + forwardHeaders: + - authorization + - flyte-authorization + grpcConfig: + host: dns:///my-authz-server.default.svc.cluster.local:50051 + insecure: true + type: External + useExternalIdentity: 'true' + cache: + identity: + enabled: false + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain + logger: + level: 6 + otel: + type: noop + sharedService: + connectPort: 8081 + metrics: + scope: 'authorizer:' + selfServeConfig: + legacyHosts: + - '' + union: + auth: + authorizationMetadataKey: flyte-authorization + clientId: 'test-internal-client-id' + clientSecretLocation: /etc/secrets/union/client_secret + enable: true + scopes: + - all + tokenUrl: 'https://test.example.com/oauth2/v1/token' + type: ClientSecret + internalConnectionConfig: + enabled: true + urlPattern: _SERVICE_.union.svc.cluster.local:80 +--- +# Source: controlplane/templates/configmap.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + cache: + identity: + enabled: false + cloudProvider: + provider: Mock + cluster: + cloudflare: + active: false + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain + db: + connectionPool: + maxConnectionLifetime: 1m + maxIdleConnections: 20 + maxOpenConnections: 20 + dbname: '' + host: '' + passwordPath: /etc/db/pass.txt + port: 5432 + username: '' + logger: + level: 6 + otel: + type: noop + sharedService: + connectPort: 8081 + metrics: + scope: 'cluster:' + selfServeConfig: + legacyHosts: + - '' + union: + auth: + authorizationMetadataKey: flyte-authorization + clientId: 'test-internal-client-id' + clientSecretLocation: /etc/secrets/union/client_secret + enable: true + scopes: + - all + tokenUrl: 'https://test.example.com/oauth2/v1/token' + type: ClientSecret + internalConnectionConfig: + enabled: true + urlPattern: _SERVICE_.union.svc.cluster.local:80 +--- +# Source: controlplane/templates/configmap.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: dataproxy + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + cache: + identity: + enabled: false + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain + dataproxy: + clusterSelector: + type: local + secureTunnelTenantURLPattern: http://ingress-nginx-internal.ingress-nginx.svc.cluster.local:80 + logger: + level: 6 + otel: + type: noop + sharedService: + metrics: + scope: 'dataproxy:' + selfServeConfig: + legacyHosts: + - '' + union: + auth: + authorizationMetadataKey: flyte-authorization + clientId: 'test-internal-client-id' + clientSecretLocation: /etc/secrets/union/client_secret + enable: true + scopes: + - all + tokenUrl: 'https://test.example.com/oauth2/v1/token' + type: ClientSecret + internalConnectionConfig: + enabled: true + urlPattern: _SERVICE_.union.svc.cluster.local:80 +--- +# Source: controlplane/templates/configmap.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: executions + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + cache: + identity: + enabled: false + cloudEventsProcessor: + cloudProvider: Local + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain + db: + connectionPool: + maxConnectionLifetime: 1m + maxIdleConnections: 20 + maxOpenConnections: 20 + dbname: '' + host: '' + passwordPath: /etc/db/pass.txt + port: 5432 + username: '' + eventsProxy: + recorderType: RunService + executions: + apps: + enrichIdentities: false + publicURLPattern: https://%s.apps. + llm: + enabled: false + task: + enabled: true + enrichIdentities: false + logger: + level: 6 + otel: + type: noop + sharedService: + metrics: + scope: 'executions:' + selfServeConfig: + legacyHosts: + - '' + union: + auth: + authorizationMetadataKey: flyte-authorization + clientId: 'test-internal-client-id' + clientSecretLocation: /etc/secrets/union/client_secret + enable: true + scopes: + - all + tokenUrl: 'https://test.example.com/oauth2/v1/token' + type: ClientSecret + internalConnectionConfig: + enabled: true + urlPattern: _SERVICE_.union.svc.cluster.local:80 + workspace: + enable: false +--- +# Source: controlplane/templates/configmap.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: queue + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + cache: + identity: + enabled: false + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain + logger: + level: 6 + otel: + type: noop + queue: + db: + hosts: + - 'scylla-client.union.svc.cluster.local' + threadCount: 64 + type: cql + eventer: + recordActionThreadCount: 16 + type: runservice + updateActionStatusThreadCount: 16 + sharedService: + metrics: + scope: 'queue:' + selfServeConfig: + legacyHosts: + - '' + union: + auth: + authorizationMetadataKey: flyte-authorization + clientId: 'test-internal-client-id' + clientSecretLocation: /etc/secrets/union/client_secret + enable: true + scopes: + - all + tokenUrl: 'https://test.example.com/oauth2/v1/token' + type: ClientSecret + internalConnectionConfig: + enabled: true + urlPattern: _SERVICE_.union.svc.cluster.local:80 +--- +# Source: controlplane/templates/configmap.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: run-scheduler + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + cache: + identity: + enabled: false + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain + db: + connectionPool: + maxConnectionLifetime: 1m + maxIdleConnections: 20 + maxOpenConnections: 20 + dbname: '' + host: '' + passwordPath: /etc/db/pass.txt + port: 5432 + username: '' + logger: + level: 6 + otel: + type: noop + sharedService: + metrics: + scope: 'run-scheduler:' + selfServeConfig: + legacyHosts: + - '' + union: + auth: + authorizationMetadataKey: flyte-authorization + clientId: 'test-internal-client-id' + clientSecretLocation: /etc/secrets/union/client_secret + enable: true + scopes: + - all + tokenUrl: 'https://test.example.com/oauth2/v1/token' + type: ClientSecret + internalConnectionConfig: + enabled: true + urlPattern: _SERVICE_.union.svc.cluster.local:80 +--- +# Source: controlplane/templates/configmap.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: usage + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + authorizer: + authorizerClient: + forwardHeaders: + - authorization + - flyte-authorization + - x-user-token + grpcConfig: + host: dns:///authorizer.union.svc.cluster.local:80 + insecure: true + type: Authorizer + useExternalIdentity: 'true' + billing: + enable: false + cache: + identity: + enabled: false + cloudProvider: + provider: Mock + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain + logger: + level: 6 + otel: + type: noop + sharedService: + connectPort: 8081 + metrics: + scope: 'usage:' + selfServeConfig: + legacyHosts: + - '' + union: + auth: + authorizationMetadataKey: flyte-authorization + clientId: 'test-internal-client-id' + clientSecretLocation: /etc/secrets/union/client_secret + enable: true + scopes: + - all + tokenUrl: 'https://test.example.com/oauth2/v1/token' + type: ClientSecret + internalConnectionConfig: + enabled: true + urlPattern: _SERVICE_.union.svc.cluster.local:80 + usage: + taskMetrics: + agentQuery: + mappings: + dgx_job: + queries: + EXECUTION_METRIC_ALLOCATED_CPU_AVG: CPU_ALLOCATION:MEAN + EXECUTION_METRIC_ALLOCATED_MEMORY_BYTES_AVG: MEM_ALLOCATION:MEAN + EXECUTION_METRIC_CPU_UTILIZATION: CPU_UTILIZATION:MEAN + EXECUTION_METRIC_GPU_UTILIZATION: GPU_UTILIZATION:MEAN + EXECUTION_METRIC_MEMORY_UTILIZATION: MEM_UTILIZATION:MEAN + metricDelayToleranceDuration: 0s + promQuery: + queries: + EXECUTION_METRIC_ALLOCATED_CPU_AVG: | + max by (namespace, pod) ( + ( + sum by (namespace, pod) (irate(container_cpu_usage_seconds_total{namespace="{{.Namespace}}",pod=~"{{.PodName}}",image!=""}[5m])) > + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="cpu"}) + ) + or + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="cpu"}) + ) * + on (namespace, pod) group_left max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1) + EXECUTION_METRIC_ALLOCATED_MEMORY_BYTES_AVG: | + max by (namespace, pod) ( + ( + sum by (namespace, pod) (container_memory_working_set_bytes{namespace="{{.Namespace}}",pod=~"{{.PodName}}",image!=""}) > + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="memory"}) + ) + or + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="memory"}) + ) * + on (namespace, pod) group_left max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1) + EXECUTION_METRIC_APP_REPLICA_COUNT: | + sum (kube_pod_status_phase{phase=~"Running|Pending", namespace="{{.Namespace}}", pod=~"{{.AppName}}.*"} == 1) or vector(0) + EXECUTION_METRIC_APP_REQUESTS: | + sum(rate(( + envoy_cluster_upstream_rq_xx{ + job="serving-envoy", + project=~"{{.Project}}", + domain=~"{{.Domain}}", + name=~"{{.AppName}}", + name!=""} + )[5m:])) by (project, domain, name, envoy_response_code_class) + EXECUTION_METRIC_APP_RESPONSE_TIME_P50: | + histogram_quantile(0.5, sum(rate(( + envoy_cluster_upstream_rq_time_bucket{ + job="serving-envoy", + project=~"${{.Project}}", + domain=~"{{.Domain}}", + name=~"{{.AppName}}", + name!=""} + )[5m:])) by (project, domain, name, le)) + EXECUTION_METRIC_APP_RESPONSE_TIME_P90: | + histogram_quantile(0.90, sum(rate(( + envoy_cluster_upstream_rq_time_bucket{ + job="serving-envoy", + project=~"${{.Project}}", + domain=~"{{.Domain}}", + name=~"{{.AppName}}", + name!=""} + )[5m:])) by (project, domain, name, le)) + EXECUTION_METRIC_APP_RESPONSE_TIME_P95: | + histogram_quantile(0.95, sum(rate(( + envoy_cluster_upstream_rq_time_bucket{ + job="serving-envoy", + project=~"${{.Project}}", + domain=~"{{.Domain}}", + name=~"{{.AppName}}", + name!=""} + )[5m:])) by (project, domain, name, le)) + EXECUTION_METRIC_CPU_UTILIZATION: | + (sum by (namespace, pod) (irate(container_cpu_usage_seconds_total{namespace="{{.Namespace}}",pod=~"{{.PodName}}",image!=""}[5m])) / + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="cpu"})) * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1) + EXECUTION_METRIC_GPU_FRAME_BUFFER_UTILIZATION: | + (sum by (namespace, pod, gpu) (DCGM_FI_DEV_FB_USED{namespace="{{.Namespace}}",pod=~"{{.PodName}}"}) / + sum by (namespace, pod, gpu) (DCGM_FI_DEV_FB_USED{namespace="{{.Namespace}}",pod=~"{{.PodName}}"} + DCGM_FI_DEV_FB_FREE{namespace="{{.Namespace}}",pod=~"{{.PodName}}"})) * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1) + EXECUTION_METRIC_GPU_MEMORY_UTILIZATION: | + sum by (gpu) (DCGM_FI_DEV_MEM_COPY_UTIL{namespace="{{.Namespace}}",pod=~"{{.PodName}}"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) / 100.0 + EXECUTION_METRIC_GPU_SM_ACTIVE: | + sum by (gpu) (DCGM_FI_PROF_SM_ACTIVE{namespace="{{.Namespace}}",pod=~"{{.PodName}}"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + EXECUTION_METRIC_GPU_SM_OCCUPANCY: | + sum by (gpu) (DCGM_FI_PROF_SM_OCCUPANCY{namespace="{{.Namespace}}",pod=~"{{.PodName}}"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + EXECUTION_METRIC_GPU_UTILIZATION: | + sum by (gpu) (DCGM_FI_DEV_GPU_UTIL{namespace="{{.Namespace}}",pod=~"{{.PodName}}"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) / 100.0 + EXECUTION_METRIC_LIMIT_CPU: | + sum by (namespace, pod) (kube_pod_container_resource_limits{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="cpu"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + EXECUTION_METRIC_LIMIT_MEMORY_BYTES: | + sum by (namespace, pod) (kube_pod_container_resource_limits{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="memory"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + EXECUTION_METRIC_MEMORY_UTILIZATION: | + (sum by (namespace, pod) (container_memory_working_set_bytes{namespace="{{.Namespace}}",pod=~"{{.PodName}}",image!=""}) / + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="memory"})) * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1) + EXECUTION_METRIC_REQUEST_CPU: | + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="cpu"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + EXECUTION_METRIC_REQUEST_MEMORY_BYTES: | + sum by (namespace, pod) (kube_pod_container_resource_requests{namespace="{{.Namespace}}",pod=~"{{.PodName}}",resource="memory"} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + EXECUTION_METRIC_USED_CPU_AVG: | + sum by (namespace, pod) (irate(container_cpu_usage_seconds_total{namespace="{{.Namespace}}",pod=~"{{.PodName}}",image!=""}[5m]) * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + EXECUTION_METRIC_USED_MEMORY_BYTES_AVG: | + sum by (namespace, pod) (container_memory_working_set_bytes{namespace="{{.Namespace}}",pod=~"{{.PodName}}",image!=""} * + on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{namespace="{{.Namespace}}",pod=~"{{.PodName}}",phase=~"Pending|Running"} == 1)) + workers: 10 +--- +# Source: controlplane/templates/monitoring/dashboard-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: release-name-dashboard-union-controlplane-overview + namespace: union + labels: + grafana_dashboard: "1" + app.kubernetes.io/managed-by: Helm +data: + union-controlplane-overview.json: |- + { + "annotations": { + "list": [] + }, + "description": "Union Controlplane health and service metrics", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "title": "Health", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "orange", + "value": 0.5 + }, + { + "color": "green", + "value": 1 + } + ] + }, + "unit": "percentunit", + "min": 0, + "max": 1 + } + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 2, + "options": { + "colorMode": "background", + "graphMode": "none", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "auto" + }, + "title": "Service Availability", + "type": "stat", + "targets": [ + { + "expr": "avg(kube_deployment_status_replicas_available{namespace=\"$namespace\"} / kube_deployment_spec_replicas{namespace=\"$namespace\"})", + "legendFormat": "Availability", + "refId": "A" + } + ], + "description": "Percentage of deployments with all requested replicas available. 1.0 = all healthy." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 3 + }, + { + "color": "red", + "value": 10 + } + ] + }, + "unit": "short" + } + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 3, + "options": { + "colorMode": "background", + "graphMode": "none", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "auto" + }, + "title": "Pod Restarts (1h)", + "type": "stat", + "targets": [ + { + "expr": "sum(increase(kube_pod_container_status_restarts_total{namespace=\"$namespace\"}[1h]))", + "legendFormat": "Restarts", + "refId": "A" + } + ], + "description": "Total container restarts in the last hour. Non-zero indicates crashlooping or OOM kills." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 0.01 + }, + { + "color": "red", + "value": 0.05 + } + ] + }, + "unit": "percentunit", + "min": 0, + "max": 1 + } + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 7, + "options": { + "colorMode": "background", + "graphMode": "area", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "auto" + }, + "title": "Connect Error Rate", + "type": "stat", + "targets": [ + { + "expr": "sum(rate(connect:server_requests_handled_total{namespace=\"$namespace\", code!~\"0|OK|Canceled|NotFound\"}[$__rate_interval])) / sum(rate(connect:server_requests_handled_total{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "Error %", + "refId": "A" + } + ], + "description": "Fraction of Connect RPC responses with non-OK/non-Canceled codes across all CP services." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "none" + } + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 4, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "title": "Connect Request Rate by Service", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (service) (rate(connect:server_requests_handled_total{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ service }}", + "refId": "A" + } + ], + "description": "Connect protocol request throughput broken down by service (e.g. ExecutionService, ClusterService)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 5, + "options": { + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "title": "Connect Errors by Code", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (code) (rate(connect:server_requests_handled_total{namespace=\"$namespace\", code!~\"0|OK|Canceled|NotFound\"}[$__rate_interval]))", + "legendFormat": "{{ code }}", + "refId": "A" + } + ], + "description": "Connect error responses by gRPC status code (Internal, Unavailable, etc.)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "short" + } + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 11, + "options": { + "colorMode": "background", + "graphMode": "none", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "value_and_name" + }, + "title": "Handler Panics", + "type": "stat", + "targets": [ + { + "expr": "sum(authorizer:handler_panic{namespace=\"$namespace\"} + cluster:handler_panic{namespace=\"$namespace\"} + dataproxy:handler_panic{namespace=\"$namespace\"} + executions:handler_panic{namespace=\"$namespace\"} + queue:handler_panic{namespace=\"$namespace\"} + usage:handler_panic{namespace=\"$namespace\"})", + "legendFormat": "Total", + "refId": "A" + } + ], + "description": "Total handler panics across all CP services. Any non-zero value indicates a service caught a panic during request handling." + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 1200, + "title": "SLOs", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "orange", + "value": 0.99 + }, + { + "color": "green", + "value": 0.999 + } + ] + }, + "unit": "percentunit", + "decimals": 3 + } + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 10 + }, + "id": 1201, + "title": "Service Availability", + "type": "stat", + "options": { + "colorMode": "background", + "graphMode": "none", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "value" + }, + "targets": [ + { + "expr": "avg(kube_deployment_status_replicas_available{namespace=\"$namespace\"} / kube_deployment_spec_replicas{namespace=\"$namespace\"})", + "refId": "A" + } + ], + "description": "Current service availability across all deployments." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": -999 + }, + { + "color": "orange", + "value": 0 + }, + { + "color": "green", + "value": 0.5 + } + ] + }, + "unit": "percentunit", + "decimals": 1, + "noValue": "N/A" + } + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 10 + }, + "id": 1202, + "title": "Error Budget Remaining", + "type": "stat", + "options": { + "colorMode": "background", + "graphMode": "area", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "value" + }, + "targets": [ + { + "expr": "union:cp:slo:error_budget_remaining", + "refId": "A" + } + ], + "description": "Fraction of error budget remaining. <0 = budget exhausted. Requires monitoring.slos.enabled." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "orange", + "value": 0.95 + }, + { + "color": "green", + "value": 0.999 + } + ] + }, + "unit": "percentunit", + "decimals": 2, + "noValue": "N/A" + } + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 10 + }, + "id": 1203, + "title": "Ingress Success Rate", + "type": "stat", + "options": { + "colorMode": "background", + "graphMode": "none", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "value" + }, + "targets": [ + { + "expr": "union:cp:slo:ingress_success_rate or (1 - sum(rate(nginx_ingress_controller_request_duration_seconds_count{namespace=\"$namespace\", status=~\"5..\"}[5m])) / sum(rate(nginx_ingress_controller_request_duration_seconds_count{namespace=\"$namespace\"}[5m])))", + "refId": "A" + } + ], + "description": "Ingress success rate (non-5xx). Customer-facing SLO metric. Falls back to raw metric if SLO recording rules are not enabled." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 5 + } + ] + }, + "unit": "s", + "decimals": 2 + } + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 10 + }, + "id": 1204, + "title": "Ingress Latency p99", + "type": "stat", + "options": { + "colorMode": "background", + "graphMode": "none", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + }, + "textMode": "value" + }, + "targets": [ + { + "expr": "union:cp:slo:ingress_latency_p99 or histogram_quantile(0.99, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{namespace=\"$namespace\"}[5m])))", + "refId": "A" + } + ], + "description": "Ingress p99 latency. Falls back to raw metric if SLO recording rules are not enabled." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "percentunit" + } + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 1205, + "title": "Availability Over Time", + "type": "timeseries", + "targets": [ + { + "expr": "avg(kube_deployment_status_replicas_available{namespace=\"$namespace\"} / kube_deployment_spec_replicas{namespace=\"$namespace\"})", + "legendFormat": "Availability", + "refId": "A" + }, + { + "expr": "vector(0.999)", + "legendFormat": "Target (99.9%)", + "refId": "B" + } + ], + "description": "Service availability over time with SLO target line." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "percentunit", + "max": 1, + "min": -0.5 + } + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 1206, + "title": "Error Budget Burn Rate", + "type": "timeseries", + "targets": [ + { + "expr": "union:cp:slo:error_budget_remaining", + "legendFormat": "Budget remaining", + "refId": "A" + }, + { + "expr": "vector(0)", + "legendFormat": "Exhausted", + "refId": "B" + } + ], + "description": "Error budget remaining over time. Requires monitoring.slos.enabled." + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 100, + "title": "Ingress (nginx)", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "normal" + } + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "id": 101, + "title": "Request Rate by Path", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (host, path) (rate(nginx_ingress_controller_request_duration_seconds_count{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ host }}{{ path }}", + "refId": "A" + } + ], + "description": "Ingress request rate broken down by host and URL path." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "normal" + } + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 10 + }, + "id": 102, + "title": "Error Rate by Status Code", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (status) (rate(nginx_ingress_controller_request_duration_seconds_count{namespace=\"$namespace\", status=~\"[45]..\"}[$__rate_interval]))", + "legendFormat": "{{ status }}", + "refId": "A" + } + ], + "description": "4xx and 5xx error rates from ingress-nginx by HTTP status code." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 103, + "title": "Latency p50 / p95 / p99", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.50, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p50", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p95", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p99", + "refId": "C" + } + ], + "description": "Ingress request latency percentiles. Includes TLS + routing + upstream response time." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 104, + "title": "Active Connections", + "type": "timeseries", + "targets": [ + { + "expr": "sum(nginx_ingress_controller_nginx_process_connections{namespace=\"$namespace\"})", + "legendFormat": "Active", + "refId": "A" + } + ], + "description": "Current number of active client connections to ingress-nginx." + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 200, + "title": "Connect / gRPC", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "normal" + } + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 201, + "title": "Connect Request Rate by Service", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (service) (rate(connect:server_requests_handled_total{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ service }}", + "refId": "A" + } + ], + "description": "Connect protocol request throughput broken down by service (e.g. ExecutionService, ClusterService)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 202, + "title": "Connect Errors by Service & Code", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (service, code) (rate(connect:server_requests_handled_total{namespace=\"$namespace\", code!~\"0|OK|Canceled|NotFound\"}[$__rate_interval]))", + "legendFormat": "{{ service }} {{ code }}", + "refId": "A" + } + ], + "description": "Connect errors broken down by service and gRPC code. Identifies which services are erroring." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 19 + }, + "id": 203, + "title": "gRPC Server Request Rate (CacheService)", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (grpc_service, grpc_method) (rate(grpc_server_handled_total{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ grpc_service }}/{{ grpc_method }}", + "refId": "A" + } + ], + "description": "CacheService is the only CP service using gRPC (not Connect). Shows Get/Put/Delete/Reservation rates." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "reqps" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 19 + }, + "id": 204, + "title": "gRPC Server Errors (CacheService)", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (grpc_method, grpc_code) (rate(grpc_server_handled_total{namespace=\"$namespace\", grpc_code!=\"OK\"}[$__rate_interval]))", + "legendFormat": "{{ grpc_method }} {{ grpc_code }}", + "refId": "A" + } + ], + "description": "CacheService gRPC errors by method and code." + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 300, + "title": "FlyteAdmin (V1 + V2)", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 12 + }, + "id": 301, + "title": "Active Executions", + "type": "timeseries", + "targets": [ + { + "expr": "flyte:admin:execution_manager:active_executions{namespace=\"$namespace\"}", + "legendFormat": "Workflows", + "refId": "A" + }, + { + "expr": "flyte:admin:node_execution_manager:active_node_executions{namespace=\"$namespace\"}", + "legendFormat": "Nodes", + "refId": "B" + }, + { + "expr": "flyte:admin:task_execution_manager:active_executions{namespace=\"$namespace\"}", + "legendFormat": "Tasks", + "refId": "C" + } + ], + "description": "Current count of active workflow, node, and task executions tracked by FlyteAdmin." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 12 + }, + "id": 302, + "title": "Execution Create / Event Rate", + "type": "timeseries", + "targets": [ + { + "expr": "rate(flyte:admin:execution_manager:executions_created{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Executions created", + "refId": "A" + }, + { + "expr": "rate(flyte:admin:execution_manager:execution_events_created{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Workflow events", + "refId": "B" + }, + { + "expr": "rate(flyte:admin:node_execution_manager:node_execution_events_created{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Node events", + "refId": "C" + }, + { + "expr": "rate(flyte:admin:task_execution_manager:task_execution_events_created{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Task events", + "refId": "D" + } + ], + "description": "Rate of execution creations and event ingestion (workflow, node, task events from propeller)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 12 + }, + "id": 303, + "title": "Errors", + "type": "timeseries", + "targets": [ + { + "expr": "rate(flyte:admin:execution_manager:propeller_failures{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Propeller failures", + "refId": "A" + }, + { + "expr": "rate(flyte:admin:execution_manager:transformer_error{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Transformer errors", + "refId": "B" + }, + { + "expr": "rate(flyte:admin:execution_manager:publish_error{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Publish errors", + "refId": "C" + }, + { + "expr": "rate(flyte:admin:execution_manager:execution_termination_failure{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Termination failures", + "refId": "D" + } + ], + "description": "FlyteAdmin error rates: propeller communication failures, model transform errors, notification publish failures." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 304, + "title": "Endpoint Latency (p95)", + "type": "timeseries", + "targets": [ + { + "expr": "flyte:admin:create_execution:duration_ms{namespace=\"$namespace\", quantile=\"0.95\"} / 1000", + "legendFormat": "CreateExecution", + "refId": "A" + }, + { + "expr": "flyte:admin:create_execution_event:duration_ms{namespace=\"$namespace\", quantile=\"0.95\"} / 1000", + "legendFormat": "CreateExecutionEvent", + "refId": "B" + }, + { + "expr": "flyte:admin:get_execution:duration_ms{namespace=\"$namespace\", quantile=\"0.95\"} / 1000", + "legendFormat": "GetExecution", + "refId": "C" + }, + { + "expr": "flyte:admin:list_execution:duration_ms{namespace=\"$namespace\", quantile=\"0.95\"} / 1000", + "legendFormat": "ListExecution", + "refId": "D" + } + ], + "description": "FlyteAdmin gRPC endpoint latency at p95. Key endpoints: CreateExecution, CreateExecutionEvent, GetExecution." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 305, + "title": "Auth Middleware Decisions", + "type": "timeseries", + "targets": [ + { + "expr": "rate(flyte:middleware:authorization:authz_approved{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Approved", + "refId": "A" + }, + { + "expr": "rate(flyte:middleware:authorization:authz_denied{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Denied", + "refId": "B" + } + ], + "description": "Authorization approve/deny rate from the FlyteAdmin auth middleware. High deny rate may indicate auth misconfiguration." + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 400, + "title": "Executions (V1 + V2)", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 12 + }, + "id": 401, + "title": "Execution Create / Ack Rate", + "type": "timeseries", + "targets": [ + { + "expr": "rate(executions:executions:handle_create_op_count{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Create", + "refId": "A" + }, + { + "expr": "rate(executions:executions:handle_ack_op_count{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Ack", + "refId": "B" + } + ], + "description": "Rate of execution operation creates and acknowledgements. Create = new execution request, Ack = DP confirmed receipt." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 12 + }, + "id": 402, + "title": "Execution Create / Ack Latency (p95)", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(executions:executions:handle_create_op_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "Create p95", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(executions:executions:handle_ack_op_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "Ack p95", + "refId": "B" + } + ], + "description": "Time to prepare create/ack execution requests at p95." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 12 + }, + "id": 403, + "title": "Assignment Duration (p50 / p90)", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.50, sum by (le) (rate(executions:workqueue:announce_cluster_assignment_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p50", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.90, sum by (le) (rate(executions:workqueue:announce_cluster_assignment_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p90", + "refId": "B" + } + ], + "description": "Key SLI: end-to-end time from execution create to cluster assignment. Custom buckets from 10ms to 20min." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 20 + }, + "id": 404, + "title": "Workqueue Operations", + "type": "timeseries", + "targets": [ + { + "expr": "rate(executions:workqueue:send_operation_count{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Send ops", + "refId": "A" + }, + { + "expr": "rate(executions:workqueue:claim_operations{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Claims", + "refId": "B" + }, + { + "expr": "rate(executions:workqueue:send_operation_failures{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Send failures", + "refId": "C" + }, + { + "expr": "rate(executions:workqueue:claim_operation_failures{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Claim failures", + "refId": "D" + } + ], + "description": "Execution operation send/claim rates and failures. Send = dispatch to DP, Claim = pick up from DB." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 20 + }, + "id": 405, + "title": "DB Operation Rate", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (op) (rate(label_replace({__name__=~\"executions:database:postgres:repositories:execution_ops:.*_count\", namespace=\"$namespace\"}, \"op\", \"$1\", \"__name__\", \"executions:database:postgres:repositories:execution_ops:(.*)_count\")[$__rate_interval:]))", + "legendFormat": "{{ op }}", + "refId": "A" + } + ], + "description": "Execution operations DB latency: create, ack, claim, unclaim, get, update." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 20 + }, + "id": 406, + "title": "DB Errors", + "type": "timeseries", + "targets": [ + { + "expr": "rate(executions:database:postgres:errors:gorm_error{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "gorm_error", + "refId": "A" + }, + { + "expr": "rate(executions:database:postgres:errors:postgres_error{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "postgres_error", + "refId": "B" + }, + { + "expr": "rate(executions:database:postgres:errors:not_found{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "not_found", + "refId": "C" + } + ], + "description": "Executions service Postgres error rates by type: gorm errors, native postgres errors, not-found." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 28 + }, + "id": 407, + "title": "Cluster Cache Hit/Miss", + "type": "timeseries", + "targets": [ + { + "expr": "rate(executions:executions:list_clusters:hits{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Cluster hits", + "refId": "A" + }, + { + "expr": "rate(executions:executions:list_clusters:miss{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Cluster miss", + "refId": "B" + }, + { + "expr": "rate(executions:executions:list_nodepools:hits{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Nodepool hits", + "refId": "C" + }, + { + "expr": "rate(executions:executions:list_nodepools:miss{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Nodepool miss", + "refId": "D" + } + ], + "description": "Cluster and nodepool list cache effectiveness. High miss rate = excessive DB queries." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 28 + }, + "id": 408, + "title": "Pending Assignments", + "type": "timeseries", + "targets": [ + { + "expr": "executions:app:leaser:pending_assignment_unlabeled{namespace=\"$namespace\"}", + "legendFormat": "Pending", + "refId": "A" + } + ], + "description": "Number of apps waiting for cluster assignment. Growing backlog = scheduling bottleneck." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 36 + }, + "id": 409, + "title": "First Ack Latency (V2 SLI)", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.50, sum by (le) (rate(executions:app:service:first_ack_latency_unlabeled_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p50", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(executions:app:service:first_ack_latency_unlabeled_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p95", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum by (le) (rate(executions:app:service:first_ack_latency_unlabeled_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p99", + "refId": "C" + } + ], + "description": "Key V2 SLI: time to deliver an app to the dataplane. Measures end-to-end scheduling latency." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 36 + }, + "id": 410, + "title": "V2 Run Dispatch", + "type": "timeseries", + "targets": [ + { + "expr": "rate(executions:run:runs_sent{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Runs sent", + "refId": "A" + }, + { + "expr": "rate(executions:run:actions_sent{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Actions sent", + "refId": "B" + }, + { + "expr": "rate(executions:run:enqueue_action_failures{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Enqueue failures", + "refId": "C" + } + ], + "description": "V2 run/action dispatch throughput. Enqueue failures indicate queue service issues." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 36 + }, + "id": 411, + "title": "V2 Run Notifier", + "type": "timeseries", + "targets": [ + { + "expr": "rate(executions:run_notifier:notifications_sent{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Notifications/s", + "refId": "A" + }, + { + "expr": "executions:run_notifier:subscribers{namespace=\"$namespace\"}", + "legendFormat": "Subscribers", + "refId": "B" + }, + { + "expr": "rate(executions:run:logs:tail_logs_bytes_read{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Log bytes/s", + "refId": "C" + } + ], + "description": "V2 notification pipeline: notifications sent per second, active subscribers, log bytes streamed." + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 500, + "title": "Queue / Run-Scheduler (V2)", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 13 + }, + "id": 501, + "title": "Metadata Store Counts", + "type": "timeseries", + "targets": [ + { + "expr": "queue:metadata_store:total_run_count{namespace=\"$namespace\"}", + "legendFormat": "Total runs", + "refId": "A" + }, + { + "expr": "queue:metadata_store:total_action_count{namespace=\"$namespace\"}", + "legendFormat": "Total actions", + "refId": "B" + }, + { + "expr": "queue:metadata_store:scheduled_run_count{namespace=\"$namespace\"}", + "legendFormat": "Scheduled runs", + "refId": "C" + }, + { + "expr": "queue:metadata_store:scheduled_action_count{namespace=\"$namespace\"}", + "legendFormat": "Scheduled actions", + "refId": "D" + } + ], + "description": "Total and scheduled run/action counts in the queue. Shows system load." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 13 + }, + "id": 502, + "title": "Scheduler / Runner / Aborter Throughput", + "type": "timeseries", + "targets": [ + { + "expr": "rate(queue:scheduler:enqueued_leases{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Enqueued", + "refId": "A" + }, + { + "expr": "rate(queue:runner:completed_leases{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Completed", + "refId": "B" + }, + { + "expr": "rate(queue:aborter:aborted_leases{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Aborted", + "refId": "C" + } + ], + "description": "Lease lifecycle throughput: enqueued (new), completed (done), aborted (cancelled)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 13 + }, + "id": 503, + "title": "Queue Lengths", + "type": "timeseries", + "targets": [ + { + "expr": "queue:scheduler:input_queue_length{namespace=\"$namespace\"}", + "legendFormat": "Scheduler input", + "refId": "A" + }, + { + "expr": "queue:runner:input_queue_length{namespace=\"$namespace\"}", + "legendFormat": "Runner input", + "refId": "B" + }, + { + "expr": "queue:aborter:input_queue_length{namespace=\"$namespace\"}", + "legendFormat": "Aborter input", + "refId": "C" + }, + { + "expr": "queue:dispatcher:chain_queue_length{namespace=\"$namespace\"}", + "legendFormat": "Dispatcher chain", + "refId": "D" + }, + { + "expr": "queue:db:queue_length{namespace=\"$namespace\"}", + "legendFormat": "DB queue", + "refId": "E" + } + ], + "description": "Internal queue depths across scheduler, runner, aborter, dispatcher, and DB worker pool. Growing = backpressure." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 21 + }, + "id": 504, + "title": "Dispatcher Operation Duration (p99)", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.99, sum by (type, le) (rate(queue:dispatcher:operation_duration_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "{{ type }}", + "refId": "A" + } + ], + "description": "Dispatcher multi-step operation chain execution time at p99, by operation type." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 21 + }, + "id": 505, + "title": "State Get/Put Duration (p99)", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.99, sum by (le) (rate(queue:state:get_duration_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "Get p99", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.99, sum by (le) (rate(queue:state:put_duration_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "Put p99", + "refId": "B" + } + ], + "description": "In-memory state store operation latency. Backed by ScyllaDB persistence." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 21 + }, + "id": 506, + "title": "State Cache & Eventer", + "type": "timeseries", + "targets": [ + { + "expr": "queue:state:active_states{namespace=\"$namespace\"}", + "legendFormat": "Active states", + "refId": "A" + }, + { + "expr": "queue:state:terminal_states{namespace=\"$namespace\"}", + "legendFormat": "Terminal states", + "refId": "B" + }, + { + "expr": "rate(queue:eventer:record_action_errors{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Eventer errors", + "refId": "C" + } + ], + "description": "Active/terminal state counts and eventer error rate. Eventer reports action status to executions service." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 29 + }, + "id": 507, + "title": "Worker Capacity", + "type": "timeseries", + "targets": [ + { + "expr": "queue:scheduler:worker_capacity{namespace=\"$namespace\"}", + "legendFormat": "{{ worker_name }}", + "refId": "A" + } + ], + "description": "Remaining execution capacity per connected DP worker. Zero = worker saturated." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 29 + }, + "id": 508, + "title": "Dispatcher Failures by Type", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (type) (rate(queue:dispatcher:operation_failures{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ type }}", + "refId": "A" + } + ], + "description": "Failed dispatcher operations by Go type. Indicates internal queue service errors." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 29 + }, + "id": 509, + "title": "DB & Client Thread Pool", + "type": "timeseries", + "targets": [ + { + "expr": "queue:db:free_threads{namespace=\"$namespace\"}", + "legendFormat": "DB free threads", + "refId": "A" + }, + { + "expr": "queue:queue_client:free_threads{namespace=\"$namespace\"}", + "legendFormat": "Queue client free", + "refId": "B" + }, + { + "expr": "queue:state_client:free_threads{namespace=\"$namespace\"}", + "legendFormat": "State client free", + "refId": "C" + } + ], + "description": "Idle worker goroutines in DB, queue-client, and state-client pools. Zero = all threads busy." + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 600, + "title": "Cluster Service (V1 + V2)", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 14 + }, + "id": 601, + "title": "UpdateStatus / Heartbeat Rate", + "type": "timeseries", + "targets": [ + { + "expr": "rate(cluster:svc:update_status:updates_total{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "UpdateStatus", + "refId": "A" + }, + { + "expr": "rate(cluster:svc:heartbeat:success_ms_count{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Heartbeat", + "refId": "B" + } + ], + "description": "Rate of DP cluster status updates and heartbeats received by the cluster service." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 14 + }, + "id": 602, + "title": "Cluster API Latency (p95)", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(cluster:svc:update_status:success_ms_count{namespace=\"$namespace\"}[$__rate_interval]))) / 1000", + "legendFormat": "UpdateStatus p95", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(cluster:svc:heartbeat:success_ms_count{namespace=\"$namespace\"}[$__rate_interval]))) / 1000", + "legendFormat": "Heartbeat p95", + "refId": "B" + } + ], + "description": "Cluster service RPC latency for UpdateStatus and Heartbeat calls." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 14 + }, + "id": 603, + "title": "Operator / Propeller Restarts (from DP)", + "type": "timeseries", + "targets": [ + { + "expr": "cluster:svc:update_status:operator_restarts{namespace=\"$namespace\"}", + "legendFormat": "Operator restarts", + "refId": "A" + }, + { + "expr": "cluster:svc:update_status:propeller_restarts{namespace=\"$namespace\"}", + "legendFormat": "Propeller restarts", + "refId": "B" + } + ], + "description": "DP-reported restart counts for operator and propeller pods. Set by DP on each UpdateStatus call." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 22 + }, + "id": 604, + "title": "DB Errors by Type", + "type": "timeseries", + "targets": [ + { + "expr": "rate(cluster:database:postgres:errors:gorm_error{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "gorm_error", + "refId": "A" + }, + { + "expr": "rate(cluster:database:postgres:errors:postgres_error{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "postgres_error", + "refId": "B" + }, + { + "expr": "rate(cluster:database:postgres:errors:not_found{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "not_found", + "refId": "C" + } + ], + "description": "Cluster service Postgres error rates by type: gorm errors, native postgres errors, not-found." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "short", + "mappings": [ + { + "type": "value", + "options": { + "0": { + "text": "Healthy", + "color": "green" + }, + "1": { + "text": "Unhealthy", + "color": "red" + } + } + } + ] + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 22 + }, + "id": 605, + "title": "Cluster Health Status", + "type": "timeseries", + "targets": [ + { + "expr": "cluster:cluster_sync:health:unhealthy{namespace=\"$namespace\", subsystem=\"\"}", + "legendFormat": "{{ org }}/{{ cluster_name }}", + "refId": "A" + } + ], + "description": "Cluster health collector: 1=unhealthy, 0=healthy. Emitted per cluster on every Prometheus scrape." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 606, + "title": "Last Heartbeat Age (stale cluster detection)", + "type": "timeseries", + "targets": [ + { + "expr": "cluster:cluster_sync:health:last_update_age{namespace=\"$namespace\"}", + "legendFormat": "{{ org }}/{{ cluster_name }}", + "refId": "A" + } + ], + "description": "Seconds since each cluster last sent a heartbeat. High values = stale/disconnected cluster." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 607, + "title": "Managed Cluster Cache", + "type": "timeseries", + "targets": [ + { + "expr": "rate(cluster:managed_cluster_client_cache:get:hits{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Cache hits", + "refId": "A" + }, + { + "expr": "rate(cluster:managed_cluster_client_cache:get:miss{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Cache miss", + "refId": "B" + } + ], + "description": "LRU cache hit/miss rate for managed cluster lookups. High miss rate = excessive DB queries." + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 900, + "title": "CacheService (V1 + V2)", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 15 + }, + "id": 901, + "title": "Cache Hit / Miss Rate", + "type": "timeseries", + "targets": [ + { + "expr": "rate(flyte:cacheservice:cache:cache_hit{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Hits", + "refId": "A" + }, + { + "expr": "rate(flyte:cacheservice:cache:not_found{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Misses", + "refId": "B" + }, + { + "expr": "rate(flyte:cacheservice:cache:get_failure{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Get failures", + "refId": "C" + } + ], + "description": "CacheService hit/miss rate. Hits = cached task output reused. Misses = task must execute. Get failures = storage errors. [Metrics pending: requires cloud service instrumentation to be deployed]" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 15 + }, + "id": 902, + "title": "Reservation Contention & Operations", + "type": "timeseries", + "targets": [ + { + "expr": "rate(flyte:cacheservice:cache:reservation_contention{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Contention", + "refId": "A" + }, + { + "expr": "rate(flyte:cacheservice:cache:get_reservation_success{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Reservation acquired", + "refId": "B" + }, + { + "expr": "rate(flyte:cacheservice:cache:release_reservation_success{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Reservation released", + "refId": "C" + } + ], + "description": "Cache reservation contention: how often workers are blocked waiting for another worker's cache computation. High contention = many workers computing the same task. [Metrics pending: requires cloud service instrumentation to be deployed]" + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 750, + "title": "Authorizer (V1 + V2)", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 15 + }, + "id": 751, + "title": "Allow / Deny Rate", + "type": "timeseries", + "targets": [ + { + "expr": "rate(authorizer:authz_allowed{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Allowed", + "refId": "A" + }, + { + "expr": "rate(authorizer:authz_denied{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Denied", + "refId": "B" + } + ], + "description": "Authorization decision rate. Allow/deny ratio indicates auth health. High deny rate may signal misconfigured policies. [Metrics pending: requires cloud service instrumentation to be deployed]" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ms" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 15 + }, + "id": 752, + "title": "Authorize Latency", + "type": "timeseries", + "targets": [ + { + "expr": "authorizer:authorize_duration{namespace=\"$namespace\", quantile=\"0.5\"}", + "legendFormat": "p50", + "refId": "A" + }, + { + "expr": "authorizer:authorize_duration{namespace=\"$namespace\", quantile=\"0.9\"}", + "legendFormat": "p90", + "refId": "B" + }, + { + "expr": "authorizer:authorize_duration{namespace=\"$namespace\", quantile=\"0.99\"}", + "legendFormat": "p99", + "refId": "C" + } + ], + "description": "End-to-end Authorize() latency including identity resolution and backend authorization check. [Metrics pending: requires cloud service instrumentation to be deployed]" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "percentunit" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 15 + }, + "id": 753, + "title": "Deny Rate (%)", + "type": "timeseries", + "targets": [ + { + "expr": "rate(authorizer:authz_denied{namespace=\"$namespace\"}[$__rate_interval]) / (rate(authorizer:authz_allowed{namespace=\"$namespace\"}[$__rate_interval]) + rate(authorizer:authz_denied{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "Deny %", + "refId": "A" + } + ], + "description": "Percentage of authorization decisions that denied access. Spikes indicate policy changes or auth issues. [Metrics pending: requires cloud service instrumentation to be deployed]" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "thresholds": { + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "mappings": [ + { + "type": "value", + "options": { + "noop": { "text": "Noop", "index": 0 }, + "userclouds": { "text": "UserClouds", "index": 1 }, + "external": { "text": "External", "index": 2 }, + "authorizer": { "text": "Authorizer", "index": 3 } + } + } + ] + } + }, + "gridPos": { + "h": 8, + "w": 4, + "x": 0, + "y": 23 + }, + "id": 760, + "options": { + "colorMode": "background", + "graphMode": "none", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^type$/" + }, + "textMode": "value" + }, + "title": "Authorizer Mode", + "type": "stat", + "targets": [ + { + "expr": "authorizer:authorizer:cloudauthorizer:connect:authz_type_info{namespace=\"$namespace\"} == 1", + "legendFormat": "{{ type }}", + "refId": "A" + } + ], + "description": "Currently active authorizer backend type (Noop, UserClouds, External, Authorizer)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ms" + } + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 4, + "y": 23 + }, + "id": 761, + "title": "External Backend Latency", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.50, sum by (le) (rate(authorizer:authorizer:cloudauthorizer:connect:external:authorize_duration_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p50", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(authorizer:authorizer:cloudauthorizer:connect:external:authorize_duration_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p95", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum by (le) (rate(authorizer:authorizer:cloudauthorizer:connect:external:authorize_duration_bucket{namespace=\"$namespace\"}[$__rate_interval])))", + "legendFormat": "p99", + "refId": "C" + } + ], + "description": "Latency of calls to the external authorization backend (p50/p95/p99)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 23 + }, + "id": 762, + "title": "External Errors by gRPC Code", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (grpc_code) (rate(authorizer:authorizer:cloudauthorizer:connect:external:errors{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ grpc_code }}", + "refId": "A" + } + ], + "description": "Error rate from the external authorization backend, broken down by gRPC status code." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 23 + }, + "id": 763, + "title": "Fail-Open Activations", + "type": "timeseries", + "targets": [ + { + "expr": "rate(authorizer:authorizer:cloudauthorizer:connect:external:fail_open_activated{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Fail-Open", + "refId": "A" + } + ], + "description": "Rate of fail-open activations. Non-zero means the external backend is unreachable and requests are being allowed without authorization." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "normal" + } + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 31 + }, + "id": 764, + "title": "Decisions by Action", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (action) (rate(authorizer:authorizer:cloudauthorizer:connect:authz_allowed{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "allowed: {{ action }}", + "refId": "A" + }, + { + "expr": "sum by (action) (rate(authorizer:authorizer:cloudauthorizer:connect:authz_denied{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "denied: {{ action }}", + "refId": "B" + } + ], + "description": "Authorization decisions broken down by action (e.g. read, write, execute). Stacked to show total volume." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 31 + }, + "id": 765, + "title": "Error Attribution", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (error_source) (rate(authorizer:authorizer:cloudauthorizer:connect:authorize_errors_total{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ error_source }}", + "refId": "A" + } + ], + "description": "Authorization errors attributed by source (e.g. identity resolution, backend, policy evaluation)." + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 700, + "title": "Data Proxy", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 15 + }, + "id": 701, + "title": "Cache Hit/Miss Rates", + "type": "timeseries", + "targets": [ + { + "expr": "rate(dataproxy:domains:hits{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Domain hits", + "refId": "A" + }, + { + "expr": "rate(dataproxy:domains:miss{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Domain miss", + "refId": "B" + }, + { + "expr": "rate(dataproxy:clusterpoolcache:hits{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "ClusterPool hits", + "refId": "C" + }, + { + "expr": "rate(dataproxy:clusterpoolcache:miss{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "ClusterPool miss", + "refId": "D" + } + ], + "description": "DataProxy internal cache effectiveness for domain resolution, cluster pool routing, and namespace mapping." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "s" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 15 + }, + "id": 702, + "title": "Image Read Latency (p95)", + "type": "timeseries", + "targets": [ + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(dataproxy:images:read:success_ms_count{namespace=\"$namespace\"}[$__rate_interval]))) / 1000", + "legendFormat": "Success p95", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum by (le) (rate(dataproxy:images:read:failure_ms_count{namespace=\"$namespace\"}[$__rate_interval]))) / 1000", + "legendFormat": "Failure p95", + "refId": "B" + } + ], + "description": "Time to read image metadata from the dataplane, proxied through DataProxy." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 703, + "title": "Secret Proxy Errors by Cluster", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (cluster, operation) (rate(dataproxy:secrets_service:cluster_errors{namespace=\"$namespace\"}[$__rate_interval]))", + "legendFormat": "{{ cluster }} {{ operation }}", + "refId": "A" + } + ], + "description": "Per-cluster secret proxy errors during fan-out operations. Identifies which dataplane cluster is causing failures. [Metrics pending: requires cloud service instrumentation to be deployed]" + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 37 + }, + "id": 800, + "title": "Usage Service", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 801, + "title": "Billable Usage Reports", + "type": "timeseries", + "targets": [ + { + "expr": "rate(usage:svc:report_billable_usage{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Reports/s", + "refId": "A" + } + ], + "description": "Rate of ReportBillableUsage calls from DP clusters. Each call reports resource consumption for billing." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 802, + "title": "Message Pipeline", + "type": "timeseries", + "targets": [ + { + "expr": "rate(usage:messages:messages_received{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Received", + "refId": "A" + }, + { + "expr": "rate(usage:messages:messages_sent{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Sent", + "refId": "B" + }, + { + "expr": "rate(usage:messages:messages_dropped{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Dropped", + "refId": "C" + }, + { + "expr": "rate(usage:messages:messages_failed{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "Failed", + "refId": "D" + } + ], + "description": "SQS/queue message processing: received, sent (success), failed, dropped (max retries exceeded)." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "normal" + } + }, + "unit": "ops" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 803, + "title": "Messages by Type (success)", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (message_type) (rate(usage:messages:messages_processed{namespace=\"$namespace\", outcome=\"success\"}[$__rate_interval]))", + "legendFormat": "{{ message_type }}", + "refId": "A" + } + ], + "description": "Successful message processing rate by type: node_execution, workflow_execution, billable_usage, serverless_billable_usage. [Metrics pending: requires cloud service instrumentation to be deployed]" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "ms" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 804, + "title": "Message Processing Latency", + "type": "timeseries", + "targets": [ + { + "expr": "usage:messages:processing_time{namespace=\"$namespace\", quantile=\"0.5\"}", + "legendFormat": "p50", + "refId": "A" + }, + { + "expr": "usage:messages:processing_time{namespace=\"$namespace\", quantile=\"0.9\"}", + "legendFormat": "p90", + "refId": "B" + }, + { + "expr": "usage:messages:processing_time{namespace=\"$namespace\", quantile=\"0.99\"}", + "legendFormat": "p99", + "refId": "C" + } + ], + "description": "Time to process individual queue messages. Slow processing may indicate backend API issues (Metronome, timestream). [Metrics pending: requires cloud service instrumentation to be deployed]" + } + ] + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 38 + }, + "id": 1100, + "title": "Infrastructure", + "type": "row", + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "normal" + } + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 1101, + "title": "CPU Usage by Service", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", container!=\"\", container!=\"POD\"}[$__rate_interval]))", + "legendFormat": "{{ container }}", + "refId": "A" + } + ], + "description": "CPU usage in cores per container, stacked. Identifies resource-heavy services." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "showPoints": "never", + "stacking": { + "mode": "normal" + } + }, + "unit": "bytes" + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 1102, + "title": "Memory Usage by Service", + "type": "timeseries", + "targets": [ + { + "expr": "sum by (container) (container_memory_working_set_bytes{namespace=\"$namespace\", container!=\"\", container!=\"POD\"})", + "legendFormat": "{{ container }}", + "refId": "A" + } + ], + "description": "Working set memory per container, stacked. Watch for approaching limits." + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "drawStyle": "bars", + "fillOpacity": 80, + "lineWidth": 1, + "showPoints": "never" + }, + "unit": "short" + } + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 1103, + "title": "Pod Restart Count by Container", + "type": "timeseries", + "targets": [ + { + "expr": "increase(kube_pod_container_status_restarts_total{namespace=\"$namespace\"}[$__rate_interval])", + "legendFormat": "{{ pod }}/{{ container }}", + "refId": "A" + } + ], + "description": "Per-container restart events. Spikes indicate crashes or OOM kills." + } + ] + } + ], + "schemaVersion": 39, + "tags": [ + "union", + "controlplane" + ], + "templating": { + "list": [ + { + "current": {}, + "hide": 0, + "includeAll": false, + "label": "Data Source", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "type": "datasource" + }, + { + "current": { + "selected": true, + "text": "union", + "value": "union" + }, + "hide": 2, + "label": "Namespace", + "name": "namespace", + "options": [ + { + "selected": true, + "text": "union", + "value": "union" + } + ], + "query": "union", + "type": "constant" + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Union Controlplane Overview", + "uid": "union-cp-overview", + "version": 2 + } +--- +# Source: controlplane/templates/scylla/storageclass.yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: scylladb +provisioner: ebs.csi.eks.amazonaws.com +volumeBindingMode: WaitForFirstConsumer +parameters: + fsType: ext4 + type: gp2 +reclaimPolicy: Delete +allowVolumeExpansion: true +--- +# Source: controlplane/charts/flyte/templates/admin/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: union-flyteadmin + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + - flyte.lyft.com + - rbac.authorization.k8s.io + resources: + - configmaps + - flyteworkflows + - namespaces + - pods + - resourcequotas + - roles + - rolebindings + - secrets + - services + - serviceaccounts + - spark-role + - limitranges + verbs: + - '*' +--- +# Source: controlplane/charts/scylla-operator/templates/edit_clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scyllacluster-edit + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: +- apiGroups: + - scylla.scylladb.com + resources: + - scyllaclusters + - scylladbmonitorings + - scylladbdatacenters + - scylladbclusters + - scylladbmanagerclusterregistrations + - scylladbmanagertasks + verbs: + - create + - patch + - update + - delete + - deletecollection +--- +# Source: controlplane/charts/scylla-operator/templates/operator.clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:controller:operator +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.operator.scylladb.com/aggregate-to-scylla-operator: "true" +--- +# Source: controlplane/charts/scylla-operator/templates/operator.clusterrole_def.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:controller:aggregate-to-operator + labels: + rbac.operator.scylladb.com/aggregate-to-scylla-operator: "true" +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - nodes + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update + - watch + - patch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - namespaces + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets + - daemonsets + - deployments + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - apps + resources: + - statefulsets/scale + verbs: + - update +- apiGroups: + - scylla.scylladb.com + resources: + - scyllaclusters + - scylladbmonitorings + - scylladbdatacenters + - remotekubernetesclusters + - scylladbclusters + - scylladbmanagerclusterregistrations + - scylladbmanagertasks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - scylla.scylladb.com + resources: + - scyllaclusters/status + - scylladbmonitorings/status + - scylladbdatacenters/status + - remotekubernetesclusters/status + - scylladbclusters/status + - scylladbmanagerclusterregistrations/status + - scylladbmanagertasks/status + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - scylla.scylladb.com + resources: + - nodeconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - scylla.scylladb.com + resources: + - nodeconfigs/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - scylla.scylladb.com + resources: + - scyllaoperatorconfigs + - scyllaoperatorconfigs/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheuses + - prometheusrules + - servicemonitors + verbs: + - get + - list + - watch + - create + - patch + - update + - delete +- apiGroups: + - "" + resources: + - configmaps/finalizers + - secrets/finalizers + - pods/finalizers + verbs: + - update +- apiGroups: + - apps + resources: + - daemonsets/finalizers + verbs: + - update +- apiGroups: + - scylla.scylladb.com + resources: + - scyllaclusters/finalizers + - scylladbdatacenters/finalizers + - scylladbmonitorings/finalizers + - scylladbmanagerclusterregistrations/finalizers + - scylladbmanagertasks/finalizers + verbs: + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets/finalizers + verbs: + - update +- apiGroups: + - scylla.scylladb.com + resources: + - nodeconfigs/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - configmaps/finalizers + - secrets/finalizers + - pods/finalizers + verbs: + - update +- apiGroups: + - apps + resources: + - daemonsets/finalizers + verbs: + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets/finalizers + verbs: + - update +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: controlplane/charts/scylla-operator/templates/operator.clusterrole_def_openshift.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:controller:aggregate-to-operator-openshift + labels: + rbac.operator.scylladb.com/aggregate-to-scylla-operator: "true" +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use +--- +# Source: controlplane/charts/scylla-operator/templates/operator_remote.clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:controller:operator-remote +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.operator.scylladb.com/aggregate-to-scylla-operator-remote: "true" +--- +# Source: controlplane/charts/scylla-operator/templates/operator_remote.clusterrole_def.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:controller:aggregate-to-operator-remote + labels: + rbac.operator.scylladb.com/aggregate-to-scylla-operator-remote: "true" +rules: +- apiGroups: + - scylla.scylladb.com + resources: + - scylladbdatacenters + - remoteowners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - scylla.scylladb.com + resources: + - scylladbdatacenters/status + - remoteowners/status + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + - namespaces + - services + - secrets + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +--- +# Source: controlplane/charts/scylla-operator/templates/scyllacluster_member_clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scyllacluster-member +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.operator.scylladb.com/aggregate-to-scylla-member: "true" +--- +# Source: controlplane/charts/scylla-operator/templates/scyllacluster_member_clusterrole_def.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:aggregate-to-scyllacluster-member + labels: + rbac.operator.scylladb.com/aggregate-to-scylla-member: "true" +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "apps" + resources: + - statefulsets + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - configmaps/finalizers + - secrets/finalizers + verbs: + - update +--- +# Source: controlplane/charts/scylla-operator/templates/scyllacluster_member_clusterrole_def_openshift.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:aggregate-to-scyllacluster-member-openshift + labels: + rbac.operator.scylladb.com/aggregate-to-scylla-member: "true" +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use +--- +# Source: controlplane/charts/scylla-operator/templates/scylladbmonitoring_grafana_clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:monitoring:grafana +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.operator.scylladb.com/aggregate-to-scylladb-monitoring-grafana: "true" +--- +# Source: controlplane/charts/scylla-operator/templates/scylladbmonitoring_grafana_clusterrole_def_openshift.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:aggregate-to-scylladb-monitoring-grafana-openshift + labels: + rbac.operator.scylladb.com/aggregate-to-scylladb-monitoring-grafana: "true" +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use +--- +# Source: controlplane/charts/scylla-operator/templates/scylladbmonitoring_prometheus_clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:monitoring:prometheus +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.operator.scylladb.com/aggregate-to-scylladb-monitoring-prometheus: "true" +--- +# Source: controlplane/charts/scylla-operator/templates/scylladbmonitoring_prometheus_clusterrole_def.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:aggregate-to-scylladb-monitoring-prometheus + labels: + rbac.operator.scylladb.com/aggregate-to-scylladb-monitoring-prometheus: "true" +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +--- +# Source: controlplane/charts/scylla-operator/templates/scylladbmonitoring_prometheus_clusterrole_def_openshift.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: scylladb:aggregate-to-scylladb-monitoring-prometheus-openshift + labels: + rbac.operator.scylladb.com/aggregate-to-scylladb-monitoring-prometheus: "true" +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use +--- +# Source: controlplane/charts/scylla-operator/templates/view_clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scyllacluster-view + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: +- apiGroups: + - scylla.scylladb.com + resources: + - scyllaclusters + - scylladbmonitorings + - scylladbdatacenters + - scylladbclusters + - scylladbmanagerclusterregistrations + - scylladbmanagertasks + verbs: + - get + - list + - watch +--- +# Source: controlplane/charts/flyte/templates/admin/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: union-flyteadmin-binding + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: union-flyteadmin +subjects: +- kind: ServiceAccount + name: flyteadmin + namespace: union +--- +# Source: controlplane/charts/scylla-operator/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: scylladb:controller:operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: scylladb:controller:operator +subjects: +- kind: ServiceAccount + name: scylla-operator + namespace: scylla-operator +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: flyteadmin + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: controlplane-2026.3.12 + #app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: + - "" + - flyte.lyft.com + - rbac.authorization.k8s.io + resources: + - configmaps + - flyteworkflows + - namespaces + - pods + - resourcequotas + - roles + - rolebindings + - secrets + - services + - serviceaccounts + - spark-role + verbs: + - '*' +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: flyteadmin-binding + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: controlplane-2026.3.12 + #app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: flyteadmin +subjects: + - kind: ServiceAccount + name: flyteadmin + namespace: union +--- +# Source: controlplane/charts/flyte/templates/admin/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: flyteadmin + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8088 + - name: grpc + port: 81 + protocol: TCP + # intentionally set to TCP instead of grpc + targetPort: 8089 + - name: redoc + protocol: TCP + port: 87 + targetPort: 8087 + - name: http-metrics + protocol: TCP + port: 10254 + selector: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/charts/flyte/templates/console/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: flyteconsole + namespace: union + labels: + app.kubernetes.io/name: flyteconsole + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + app.kubernetes.io/managed-by: Helm + annotations: + external-dns.alpha.kubernetes.io/hostname: flyte.example.com + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "600" +spec: + type: ClusterIP + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/name: flyteconsole + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/charts/scylla-operator/templates/webhookserver.service.yaml +apiVersion: v1 +kind: Service +metadata: + namespace: scylla-operator + name: scylla-operator-webhook + labels: + app.kubernetes.io/name: webhook-server + app.kubernetes.io/instance: webhook-server +spec: + ports: + - port: 443 + targetPort: 5000 + name: webhook + selector: + app.kubernetes.io/name: webhook-server + app.kubernetes.io/instance: webhook-server +--- +# Source: controlplane/templates/cacheservice/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cacheservice + namespace: union + labels: + platform.union.ai/prometheus-group: "union-services" + app.kubernetes.io/name: cacheservice + app.kubernetes.io/instance: release-name + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: http + port: 88 + protocol: TCP + targetPort: http + - name: grpc + port: 89 + protocol: TCP + targetPort: grpc + - name: http-metrics + protocol: TCP + port: 10254 + selector: + app.kubernetes.io/name: cacheservice + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/console/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: unionconsole + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + - port: 8081 + targetPort: http-metrics + protocol: TCP + name: http-metrics + selector: + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/service.yaml +--- +apiVersion: v1 +kind: Service +metadata: + name: authorizer + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: grpc + port: 80 + protocol: TCP + targetPort: connect + - name: grpc-native + port: 8080 + protocol: TCP + targetPort: grpc + - name: connect + port: 83 + protocol: TCP + targetPort: connect + - name: http + port: 81 + protocol: TCP + targetPort: http + - name: debug + port: 82 + protocol: TCP + targetPort: debug + selector: + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: grpc + port: 80 + protocol: TCP + targetPort: connect + - name: grpc-native + port: 8080 + protocol: TCP + targetPort: grpc + - name: connect + port: 83 + protocol: TCP + targetPort: connect + - name: http + port: 81 + protocol: TCP + targetPort: http + - name: debug + port: 82 + protocol: TCP + targetPort: debug + selector: + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: dataproxy + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: grpc + port: 80 + protocol: TCP + targetPort: grpc + - name: connect + port: 83 + protocol: TCP + targetPort: connect + - name: http + port: 81 + protocol: TCP + targetPort: http + - name: debug + port: 82 + protocol: TCP + targetPort: debug + selector: + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: executions + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: grpc + port: 80 + protocol: TCP + targetPort: grpc + - name: connect + port: 83 + protocol: TCP + targetPort: connect + - name: http + port: 81 + protocol: TCP + targetPort: http + - name: debug + port: 82 + protocol: TCP + targetPort: debug + selector: + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: queue + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: grpc + port: 80 + protocol: TCP + targetPort: grpc + - name: connect + port: 83 + protocol: TCP + targetPort: connect + - name: http + port: 81 + protocol: TCP + targetPort: http + - name: debug + port: 82 + protocol: TCP + targetPort: debug + selector: + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: run-scheduler + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: grpc + port: 80 + protocol: TCP + targetPort: grpc + - name: connect + port: 83 + protocol: TCP + targetPort: connect + - name: http + port: 81 + protocol: TCP + targetPort: http + - name: debug + port: 82 + protocol: TCP + targetPort: debug + selector: + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: usage + labels: + platform.union.ai/prometheus-group: "union-services" + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - name: grpc + port: 80 + protocol: TCP + targetPort: connect + - name: grpc-native + port: 8080 + protocol: TCP + targetPort: grpc + - name: connect + port: 83 + protocol: TCP + targetPort: connect + - name: http + port: 81 + protocol: TCP + targetPort: http + - name: debug + port: 82 + protocol: TCP + targetPort: debug + selector: + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name +--- +# Source: controlplane/charts/flyte/templates/admin/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: flyteadmin + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + configChecksum: "b273dd4479bec174955aede3298a293ecb7352770754604ae4a759454e65f7d" + kubectl.kubernetes.io/default-container: flyteadmin + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm + spec: + securityContext: + fsGroup: 65534 + fsGroupChangePolicy: Always + runAsNonRoot: true + runAsUser: 1001 + seLinuxOptions: + type: spc_t + initContainers: + - command: + - flyteadmin + - --config + - /etc/flyte/config/*.yaml + - migrate + - run + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:" + imagePullPolicy: "IfNotPresent" + name: run-migrations + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + volumeMounts: + - mountPath: /etc/db + name: union-controlplane-secrets + - mountPath: /etc/flyte/config + name: base-config-volume + - command: + - flyteadmin + - --config + - /etc/flyte/config/*.yaml + - migrate + - seed-projects + - union-health-monitoring + - flytesnacks + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:" + imagePullPolicy: "IfNotPresent" + name: seed-projects + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + volumeMounts: + - mountPath: /etc/db + name: union-controlplane-secrets + - mountPath: /etc/flyte/config + name: base-config-volume + - name: generate-secrets + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:" + imagePullPolicy: "IfNotPresent" + command: ["/bin/sh", "-c"] + args: + [ + "flyteadmin --config=/etc/flyte/config/*.yaml secrets init --localPath /etc/scratch/secrets && flyteadmin --config=/etc/flyte/config/*.yaml secrets create --name flyte-admin-secrets --fromPath /etc/scratch/secrets", + ] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + volumeMounts: + - mountPath: /etc/flyte/config + name: base-config-volume + - mountPath: /etc/scratch + name: scratch + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + containers: + - command: + - flyteadmin + - --config + - /etc/flyte/config/*.yaml + - serve + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:" + imagePullPolicy: "IfNotPresent" + name: flyteadmin + ports: + - containerPort: 8088 + - containerPort: 8089 + - containerPort: 10254 + readinessProbe: + httpGet: + path: /healthcheck + port: 8088 + initialDelaySeconds: 15 + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + httpGet: + path: /healthcheck + port: 8088 + initialDelaySeconds: 20 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 2 + ephemeral-storage: 500Mi + memory: 3Gi + requests: + cpu: 50m + ephemeral-storage: 200Mi + memory: 500Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + volumeMounts: + - mountPath: /etc/db + name: union-controlplane-secrets + - mountPath: /srv/flyte + name: shared-data + - mountPath: /etc/flyte/config + name: clusters-config-volume + - mountPath: /etc/secrets/ + name: admin-secrets + - mountPath: /etc/secrets/union + name: union-secrets + readOnly: true + serviceAccountName: flyteadmin + volumes: + - name: union-controlplane-secrets + secret: + secretName: union-controlplane-secrets + - emptyDir: {} + name: shared-data + - emptyDir: {} + name: scratch + - projected: + sources: + - configMap: + name: flyte-admin-base-config + name: base-config-volume + - projected: + sources: + - configMap: + name: flyte-admin-base-config + - configMap: + name: flyte-admin-clusters-config + name: clusters-config-volume + - name: admin-secrets + secret: + secretName: flyte-admin-secrets + - name: union-secrets + secret: + secretName: '' +--- +# Source: controlplane/charts/flyte/templates/console/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: flyteconsole + namespace: union + labels: + app.kubernetes.io/name: flyteconsole + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: flyteconsole + app.kubernetes.io/instance: release-name + template: + metadata: + annotations: + configChecksum: "2620ed20cf30d64460b231bbcf13fc096a23b6d373b46e69ab5f2e051f3d3d1" + linkerd.io/inject: disabled + prometheus.io/scrape: "false" + labels: + app.kubernetes.io/name: flyteconsole + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + app.kubernetes.io/managed-by: Helm + spec: + securityContext: + fsGroupChangePolicy: OnRootMismatch + runAsNonRoot: true + runAsUser: 1000 + seLinuxOptions: + type: spc_t + containers: + - image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/flyteconsole:" + imagePullPolicy: "IfNotPresent" + name: flyteconsole + envFrom: + - configMapRef: + name: flyte-console-config + ports: + - containerPort: 8080 + env: + - name: ENABLE_GA + value: "true" + - name: GA_TRACKING_ID + value: "" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + resources: + limits: + cpu: 250m + ephemeral-storage: 200Mi + memory: 250Mi + requests: + cpu: 10m + ephemeral-storage: 20Mi + memory: 50Mi + volumeMounts: + - mountPath: /srv/flyte + name: shared-data + volumes: + - emptyDir: {} + name: shared-data +--- +# Source: controlplane/charts/scylla-operator/templates/operator.deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: scylla-operator + namespace: scylla-operator + labels: + app.kubernetes.io/name: scylla-operator + app.kubernetes.io/instance: scylla-operator +spec: + replicas: 2 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: scylla-operator + app.kubernetes.io/instance: scylla-operator + template: + metadata: + labels: + app.kubernetes.io/name: scylla-operator + app.kubernetes.io/instance: scylla-operator + spec: + serviceAccountName: scylla-operator + containers: + - name: scylla-operator + image: scylladb/scylla-operator:1.18.1 + imagePullPolicy: IfNotPresent + env: + - name: SCYLLA_OPERATOR_IMAGE + value: scylladb/scylla-operator:1.18.1 + args: + - operator + - --loglevel=2 + resources: + requests: + cpu: 100m + memory: 20Mi + terminationGracePeriodSeconds: 10 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: scylla-operator + app.kubernetes.io/name: scylla-operator + topologyKey: kubernetes.io/hostname + weight: 1 +--- +# Source: controlplane/charts/scylla-operator/templates/webhookserver.deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: scylla-operator + name: webhook-server + labels: + app.kubernetes.io/name: webhook-server + app.kubernetes.io/instance: webhook-server +spec: + replicas: 2 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: webhook-server + app.kubernetes.io/instance: webhook-server + template: + metadata: + labels: + app.kubernetes.io/name: webhook-server + app.kubernetes.io/instance: webhook-server + spec: + serviceAccountName: "webhook-server" + containers: + - name: webhook-server + image: scylladb/scylla-operator:1.18.1 + imagePullPolicy: IfNotPresent + args: + - run-webhook-server + - --loglevel=2 + - --tls-cert-file=/tmp/serving-certs/tls.crt + - --tls-private-key-file=/tmp/serving-certs/tls.key + livenessProbe: + httpGet: + path: /readyz + port: 5000 + scheme: HTTPS + readinessProbe: + httpGet: + path: /readyz + port: 5000 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 10 + lifecycle: + preStop: + exec: + command: + - /usr/bin/sleep + - 15s + ports: + - containerPort: 5000 + name: webhook-server + protocol: TCP + resources: + requests: + cpu: 10m + memory: 20Mi + volumeMounts: + - mountPath: /tmp/serving-certs + name: cert + readOnly: true + terminationGracePeriodSeconds: 75 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: scylla-operator-serving-cert + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: webhook-server + app.kubernetes.io/name: webhook-server + topologyKey: kubernetes.io/hostname + weight: 1 +--- +# Source: controlplane/templates/cacheservice/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cacheservice + namespace: union + labels: + app.kubernetes.io/name: cacheservice + app.kubernetes.io/instance: release-name + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cacheservice + app.kubernetes.io/instance: release-name + template: + metadata: + annotations: + configChecksum: "7eede7c92a6d230760dd9b2dd3404831ea5bc338b34159ebc5290cdbf18b560" + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: cacheservice + app.kubernetes.io/instance: release-name + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/managed-by: Helm + spec: + securityContext: + fsGroup: 1001 + fsGroupChangePolicy: OnRootMismatch + runAsNonRoot: true + runAsUser: 1001 + seLinuxOptions: + type: spc_t + initContainers: + - command: + - cacheservice + - --config + - /etc/cacheservice/config/*.yaml + - migrate + - run + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:" + imagePullPolicy: "IfNotPresent" + name: run-migrations + volumeMounts: + - mountPath: /etc/db + name: union-controlplane-secrets + - mountPath: /etc/cacheservice/config + name: config-volume + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + containers: + - command: + - cacheservice + - --config + - /etc/cacheservice/config/*.yaml + - serve + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:" + imagePullPolicy: "IfNotPresent" + name: cacheservice + ports: + - name: http + containerPort: 8088 + - name: grpc + containerPort: 8089 + - name: http-metrics + containerPort: 10254 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + resources: + limits: + cpu: 1 + ephemeral-storage: 200Mi + requests: + cpu: 500m + ephemeral-storage: 200Mi + memory: 200Mi + volumeMounts: + - mountPath: /etc/db + name: union-controlplane-secrets + - mountPath: /etc/cacheservice/config + name: config-volume + - mountPath: /etc/secrets/union + name: union-secrets + readOnly: true + serviceAccountName: cacheservice + volumes: + - name: union-controlplane-secrets + secret: + secretName: union-controlplane-secrets + - emptyDir: {} + name: shared-data + - configMap: + name: cacheservice-config + name: config-volume + - name: union-secrets + secret: + secretName: '' + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: cacheservice + topologyKey: kubernetes.io/hostname +--- +# Source: controlplane/templates/console/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: unionconsole + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: unionconsole + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: unionconsole + securityContext: + fsGroupChangePolicy: OnRootMismatch + runAsNonRoot: true + runAsUser: 1000 + seLinuxOptions: + type: spc_t + containers: + - name: unionconsole + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.3.9" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: http-metrics + containerPort: 8081 + protocol: TCP + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 250Mi +--- +# Source: controlplane/templates/deployment.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: authorizer + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: authorizer + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: authorizer + volumes: + - name: secrets + secret: + secretName: + - name: db-pass + secret: + secretName: + - name: config + configMap: + name: authorizer + containers: + - name: authorizer + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - authorizer + - serve + - --config + - /etc/config/*.yaml + ports: + - name: grpc + containerPort: 8080 + protocol: TCP + - name: http + containerPort: 8089 + protocol: TCP + - name: debug + containerPort: 10254 + protocol: TCP + - name: connect + containerPort: 8081 + protocol: TCP + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.cpu + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 250Mi + livenessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: authorizer + app.kubernetes.io/instance: release-name + topologyKey: "kubernetes.io/hostname" +--- +# Source: controlplane/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: cluster + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: cluster + volumes: + - name: secrets + secret: + secretName: + - name: db-pass + secret: + secretName: + - name: config + configMap: + name: cluster + initContainers: + - name: cluster-migrate + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - cloudcluster + - migrate + - --config + - /etc/config/*.yaml + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + containers: + - name: cluster + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - cloudcluster + - serve + - --config + - /etc/config/*.yaml + ports: + - name: grpc + containerPort: 8080 + protocol: TCP + - name: http + containerPort: 8089 + protocol: TCP + - name: debug + containerPort: 10254 + protocol: TCP + - name: connect + containerPort: 8081 + protocol: TCP + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.cpu + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 250Mi + livenessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: cluster + app.kubernetes.io/instance: release-name + topologyKey: "kubernetes.io/hostname" +--- +# Source: controlplane/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dataproxy + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: dataproxy + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: dataproxy + volumes: + - name: secrets + secret: + secretName: + - name: db-pass + secret: + secretName: + - name: config + configMap: + name: dataproxy + containers: + - name: dataproxy + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - dataproxy + - serve + - --config + - /etc/config/*.yaml + ports: + - name: grpc + containerPort: 8080 + protocol: TCP + - name: http + containerPort: 8089 + protocol: TCP + - name: debug + containerPort: 10254 + protocol: TCP + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.cpu + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 250Mi + livenessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: dataproxy + app.kubernetes.io/instance: release-name + topologyKey: "kubernetes.io/hostname" +--- +# Source: controlplane/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: executions + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: executions + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: executions + volumes: + - name: secrets + secret: + secretName: + - name: db-pass + secret: + secretName: + - name: config + configMap: + name: executions + initContainers: + - name: executions-migrate + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - cloudpropeller + - migrate + - --config + - /etc/config/*.yaml + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + containers: + - name: executions + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - cloudpropeller + - serve + - --config + - /etc/config/*.yaml + ports: + - name: grpc + containerPort: 8080 + protocol: TCP + - name: http + containerPort: 8089 + protocol: TCP + - name: debug + containerPort: 10254 + protocol: TCP + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.cpu + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 250Mi + livenessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: executions + app.kubernetes.io/instance: release-name + topologyKey: "kubernetes.io/hostname" +--- +# Source: controlplane/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: queue + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: queue + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: queue + volumes: + - name: secrets + secret: + secretName: + - name: db-pass + secret: + secretName: + - name: config + configMap: + name: queue + initContainers: + - name: queue-migrate + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - queue + - migrate + - --config + - /etc/config/*.yaml + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + containers: + - name: queue + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - queue + - serve + - --config + - /etc/config/*.yaml + ports: + - name: grpc + containerPort: 8080 + protocol: TCP + - name: http + containerPort: 8089 + protocol: TCP + - name: debug + containerPort: 10254 + protocol: TCP + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.cpu + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 250Mi + livenessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: queue + app.kubernetes.io/instance: release-name + topologyKey: "kubernetes.io/hostname" +--- +# Source: controlplane/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: run-scheduler + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: run-scheduler + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: run-scheduler + volumes: + - name: secrets + secret: + secretName: + - name: db-pass + secret: + secretName: + - name: config + configMap: + name: run-scheduler + initContainers: + - name: run-scheduler-migrate + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - cloudpropeller + - migrate + - --config + - /etc/config/*.yaml + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + containers: + - name: run-scheduler + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - cloudpropeller + - scheduler + - start + - --config + - /etc/config/*.yaml + ports: + - name: grpc + containerPort: 8080 + protocol: TCP + - name: http + containerPort: 8089 + protocol: TCP + - name: debug + containerPort: 10254 + protocol: TCP + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.cpu + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 250Mi + livenessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: run-scheduler + app.kubernetes.io/instance: release-name + topologyKey: "kubernetes.io/hostname" +--- +# Source: controlplane/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: usage + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: usage + linkerd.io/inject: disabled + prometheus.io/path: /metrics + prometheus.io/port: "10254" + labels: + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: usage + volumes: + - name: secrets + secret: + secretName: + - name: db-pass + secret: + secretName: + - name: config + configMap: + name: usage + containers: + - name: usage + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + imagePullPolicy: IfNotPresent + args: + - usage + - serve + - --config + - /etc/config/*.yaml + ports: + - name: grpc + containerPort: 8080 + protocol: TCP + - name: http + containerPort: 8089 + protocol: TCP + - name: debug + containerPort: 10254 + protocol: TCP + - name: connect + containerPort: 8081 + protocol: TCP + volumeMounts: + - name: db-pass + mountPath: /etc/db + - name: secrets + mountPath: /etc/secrets/union + - name: config + mountPath: /etc/config/ + env: + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.cpu + resources: + limits: + cpu: 3 + memory: 512Mi + requests: + cpu: 500m + memory: 250Mi + livenessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthcheck + port: debug + initialDelaySeconds: 3 + periodSeconds: 3 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: usage + app.kubernetes.io/instance: release-name + topologyKey: "kubernetes.io/hostname" +--- +# Source: controlplane/charts/flyte/templates/admin/hpa.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: flyteadmin + namespace: union + labels: + app.kubernetes.io/name: flyteadmin + app.kubernetes.io/instance: release-name + helm.sh/chart: flyte-v1.16.1 + #app.kubernetes.io/managed-by: Helm +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: flyteadmin + minReplicas: 1 + maxReplicas: 10 + metrics: + + - resource: + name: cpu + target: + averageUtilization: 80 + type: Utilization + type: Resource + - resource: + name: memory + target: + averageUtilization: 80 + type: Utilization + type: Resource +--- +# Source: controlplane/templates/console/hpa.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: unionconsole + labels: + helm.sh/chart: controlplane-2026.3.12 + app.kubernetes.io/name: unionconsole + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/managed-by: Helm +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: unionconsole + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +--- +# Source: controlplane/templates/hpa.yaml +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: authorizer +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: authorizer + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +--- +# Source: controlplane/templates/hpa.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: cluster +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: cluster + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +--- +# Source: controlplane/templates/hpa.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: dataproxy +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: dataproxy + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +--- +# Source: controlplane/templates/hpa.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: executions +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: executions + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +--- +# Source: controlplane/templates/hpa.yaml +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: run-scheduler +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: run-scheduler + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +--- +# Source: controlplane/templates/hpa.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: usage +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: usage + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-dataproxy + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/auth-cache-key: $http_flyte_authorization$http_cookie + nginx.ingress.kubernetes.io/auth-response-headers: Set-Cookie,X-User-Subject,X-User-Claim-Identitytype,X-User-Claim-Preferred-Username,X-User-Token + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?redirect_url=$escaped_request_uri + nginx.ingress.kubernetes.io/auth-url: https://$host/me + nginx.org/websocket-services: dataproxy-service +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: /data/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /data + pathType: Prefix + backend: + service: + name: dataproxy + port: + name: grpc +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-usage-grpc + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/backend-protocol: GRPC + nginx.ingress.kubernetes.io/auth-cache-key: $http_authorization$http_flyte_authorization$http_cookie + nginx.ingress.kubernetes.io/auth-response-headers: Set-Cookie,X-User-Subject,X-User-Claim-Identitytype,X-User-Claim-Preferred-Username,X-User-Token + nginx.ingress.kubernetes.io/auth-url: http://flyteadmin.union.svc.cluster.local/me + nginx.ingress.kubernetes.io/use-regex: "true" +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: /cloudidl.usage.UsageService(/(?!GetCustomMeasuresNames|GetMeasureGroup|GetMeasureGroups|GetBillableMeasures|GetBillingInfo|ReportBillableUsage|ReportServerlessBillableUsage|CreateCustomer|AttachBillingPlanToCustomer|GetCustomerCredits|EnqueueMetronomeRequest|EnqueueStripeRequest|GetOrgCheckoutSession).*|$) + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: connect +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-usage + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/auth-cache-key: $http_flyte_authorization$http_cookie + nginx.ingress.kubernetes.io/auth-response-headers: Set-Cookie,X-User-Subject,X-User-Claim-Identitytype,X-User-Claim-Preferred-Username,X-User-Token + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?redirect_url=$escaped_request_uri + nginx.ingress.kubernetes.io/auth-url: https://$host/me + nginx.org/websocket-services: dataproxy-service + nginx.ingress.kubernetes.io/use-regex: "true" +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: /usage/api/v1(/(?!custom_measures_names|measure_group|measure_groups|billable_measures|billing_info|report_billable_usage|customer_credits|checkout_session).*|$) + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: http +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-protected + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/auth-cache-key: $http_flyte_authorization$http_cookie + nginx.ingress.kubernetes.io/auth-response-headers: Set-Cookie,X-User-Subject,X-User-Claim-Identitytype,X-User-Claim-Preferred-Username,X-User-Token + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?redirect_url=$escaped_request_uri + nginx.ingress.kubernetes.io/auth-url: https://$host/me + nginx.org/websocket-services: dataproxy-service +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: /api + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /api/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /v1/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /cloudadmin + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /cloudadmin/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /actor + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /actor/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /agent + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /agent/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /dataplane + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /dataplane/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /spark-history-server + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /spark-history-server/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /api/v1/dataproxy + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /api/v1/dataproxy/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: http + - path: /app + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: http + - path: /app/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: http + - path: /apps + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: http + - path: /apps/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: http + - path: /cluster + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /cluster/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /clusterpool + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /clusterpool/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /clusterconfig + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /clusterconfig/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /org + pathType: ImplementationSpecific + backend: + service: + name: organizations + port: + name: http + - path: /org/* + pathType: ImplementationSpecific + backend: + service: + name: organizations + port: + name: http + - path: /managed_cluster + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /managed_cluster/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: http + - path: /authorizer + pathType: ImplementationSpecific + backend: + service: + name: authorizer + port: + name: http + - path: /authorizer/* + pathType: ImplementationSpecific + backend: + service: + name: authorizer + port: + name: http + - path: /oauth_app + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /oauth_app/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /users + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /users/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /members + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /members/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /roles + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /roles/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /policies + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /policies/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /identities + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /identities/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: http + - path: /echo + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /echo/* + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /execution + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /execution/* + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /workspace_registry + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /workspace_registry/* + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /workspace_instance + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /workspace_instance/* + pathType: ImplementationSpecific + backend: + service: + name: execution + port: + name: http + - path: /usage + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: http + - path: /usage/* + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: http +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-protected-grpc + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/backend-protocol: GRPC + nginx.ingress.kubernetes.io/auth-cache-key: $http_authorization$http_flyte_authorization$http_cookie + nginx.ingress.kubernetes.io/auth-response-headers: Set-Cookie,X-User-Subject,X-User-Claim-Identitytype,X-User-Claim-Preferred-Username,X-User-Token + nginx.ingress.kubernetes.io/auth-url: http://flyteadmin.union.svc.cluster.local/me +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: /cloudidl.execution.ExecutionService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.execution.ExecutionService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.cluster.ClusterService/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.cluster.ClusterService + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.cluster.ClusterNodepoolService/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.cluster.ClusterNodepoolService + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.apikey.APIKeyService/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.apikey.APIKeyService + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.AppsService/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.AppsService + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.org.OrgService/* + pathType: ImplementationSpecific + backend: + service: + name: organizations + port: + name: grpc + - path: /cloudidl.org.OrgService + pathType: ImplementationSpecific + backend: + service: + name: organizations + port: + name: grpc + - path: /cloudidl.cloudaccounts.CloudAccountsService/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: grpc + - path: /cloudidl.cloudaccounts.CloudAccountsService + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: grpc + - path: /cloudidl.cluster.ManagedClusterService/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.cluster.ManagedClusterService + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.identity.UserService/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.UserService + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.MemberService/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.MemberService + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.RoleService/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.RoleService + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.PolicyService/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.PolicyService + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: connect + - path: /cloudidl.identity.SelfServe/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: grpc + - path: /cloudidl.identity.SelfServe + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: grpc + - path: /cloudidl.identity.IdentityService/* + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: grpc + - path: /cloudidl.identity.IdentityService + pathType: ImplementationSpecific + backend: + service: + name: identity + port: + name: grpc + - path: /cloudidl.clusterpool.ClusterPoolService/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.clusterpool.ClusterPoolService + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.clusterconfig.ClusterConfigService/* + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.clusterconfig.ClusterConfigService + pathType: ImplementationSpecific + backend: + service: + name: cluster + port: + name: connect + - path: /cloudidl.authorizer.AuthorizerService/* + pathType: ImplementationSpecific + backend: + service: + name: authorizer + port: + name: connect + - path: /cloudidl.authorizer.AuthorizerService + pathType: ImplementationSpecific + backend: + service: + name: authorizer + port: + name: connect + - path: /cloudidl.usage.UsageService/* + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: connect + - path: /cloudidl.usage.UsageService + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: connect + - path: /datacatalog.DataCatalog/* + pathType: ImplementationSpecific + backend: + service: + name: datacatalog + port: + name: grpc + - path: /datacatalog.DataCatalog + pathType: ImplementationSpecific + backend: + service: + name: datacatalog + port: + name: grpc + - path: /flyteidl.cacheservice.CacheService/* + pathType: ImplementationSpecific + backend: + service: + name: cacheservice + port: + name: grpc + - path: /flyteidl.cacheservice.CacheService + pathType: ImplementationSpecific + backend: + service: + name: cacheservice + port: + name: grpc + - path: /flyteidl.cacheservice.v2.CacheService/* + pathType: ImplementationSpecific + backend: + service: + name: cacheservice + port: + name: grpc + - path: /flyteidl.cacheservice.v2.CacheService + pathType: ImplementationSpecific + backend: + service: + name: cacheservice + port: + name: grpc + - path: /cloudidl.actor.ActorEnvironmentService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.actor.ActorEnvironmentService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.agent.AgentService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.agent.AgentService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.secret.SecretService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.secret.SecretService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.secret.SecretService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.secret.SecretService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.support.SupportService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.clouddataproxy.CloudDataProxyService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.clouddataproxy.CloudDataProxyService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl.service.DataProxyService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl.service.DataProxyService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.dataproxy.DataProxyService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.dataproxy.DataProxyService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.logs.LogsService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.logs.LogsService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.workspace.WorkspaceRegistryService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workspace.WorkspaceRegistryService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workspace.WorkspaceInstanceService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workspace.WorkspaceInstanceService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.RunService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.RunService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.InternalRunService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.InternalRunService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.TranslatorService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.TranslatorService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.TaskService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.TaskService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.TriggerService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.TriggerService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.QueueService + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.workflow.QueueService/* + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.workflow.StateService + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.workflow.StateService/* + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + + - path: /flyteidl2.workflow.RunService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.workflow.RunService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.workflow.TranslatorService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.workflow.TranslatorService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.task.TaskService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.task.TaskService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.workflow.QueueService + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /flyteidl2.workflow.QueueService/* + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /flyteidl2.trigger.TriggerService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.trigger.TriggerService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.workflow.StateService + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /flyteidl2.workflow.StateService/* + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.imagebuilder.ImageService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.imagebuilder.ImageService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.imagebuilder.ImageService + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.imagebuilder.ImageService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.app.AppService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.app.AppLogsService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.app.ReplicaService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.app.AppService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.app.AppLogsService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.app.ReplicaService/* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-protected-grpc-streaming + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/backend-protocol: GRPC + nginx.ingress.kubernetes.io/auth-cache-key: $http_authorization$http_flyte_authorization$http_cookie + nginx.ingress.kubernetes.io/auth-response-headers: Set-Cookie,X-User-Subject,X-User-Claim-Identitytype,X-User-Claim-Preferred-Username,X-User-Token + nginx.ingress.kubernetes.io/auth-url: http://flyteadmin.union.svc.cluster.local/me +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: /flyteidl2.auth.IdentityService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl2.auth.IdentityService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl2.project.ProjectService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl2.project.ProjectService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.AdminService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.AdminService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + + - path: /flyteidl.service.WatchService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + + - path: /flyteidl.service.WatchService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /cloudidl.cloudadmin.CloudAdminService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /cloudidl.cloudadmin.CloudAdminService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.IdentityService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.IdentityService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /cloudidl.echo.EchoService/* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.echo.EchoService + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl.service.SignalService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.SignalService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /cloudidl.actor.ActorEnvironmentService/Stream* + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.execution.ExecutionService/GetExecutionOperation + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.RunLogsService/TailLogs + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.RunService/Watch* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.InternalRunService/Record* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.InternalRunService/Update* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.TaskService/Watch* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.workflow.LeaseService/Heartbeat + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.workflow.QueueService/Heartbeat + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.workflow.StateService/Watch* + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.workflow.QueueService/StreamLeases + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.workflow.LeaseService/StreamLeases + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + + - path: /flyteidl2.workflow.RunLogsService/TailLogs + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.workflow.RunService/Watch* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.task.TaskService/Watch* + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.workflow.QueueService/Heartbeat + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /flyteidl2.workflow.StateService/Watch* + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /flyteidl2.workflow.QueueService/StreamLeases + pathType: ImplementationSpecific + backend: + service: + name: queue + port: + name: grpc + - path: /cloudidl.logs.LogsService/TailTaskExecutionLogs + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.workspace.WorkspaceInstanceService/WatchWorkspaceInstances + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.app.AppService/Watch + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.app.AppService/Lease + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /cloudidl.app.AppLogsService/TailLogs + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /cloudidl.app.ReplicaService/WatchReplicas + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.app.AppService/Watch + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.app.AppService/Lease + pathType: ImplementationSpecific + backend: + service: + name: executions + port: + name: grpc + - path: /flyteidl2.app.AppLogsService/TailLogs + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc + - path: /flyteidl2.app.ReplicaService/WatchReplicas + pathType: ImplementationSpecific + backend: + service: + name: dataproxy + port: + name: grpc +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + # Port 87 in FlyteAdmin maps to the redoc container. + - path: /openapi + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: redoc + - path: /healthcheck + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /healthz + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /me + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + # Port 87 in FlyteAdmin maps to the redoc container. + - path: /openapi/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: redoc + - path: /.well-known + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /.well-known/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /login + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /login/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /logout + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /logout/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /callback + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /callback/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /config + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /config/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /oauth2/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /auth + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /auth/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: http + - path: /enqueue_metronome_request/v1 + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: http + - path: /enqueue_metronome_request/v1/* + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: http + - path: /enqueue_stripe_request/v1 + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: http + - path: /enqueue_stripe_request/v1/* + pathType: ImplementationSpecific + backend: + service: + name: usage + port: + name: http +--- +# Source: controlplane/templates/flyte-core-app.yaml +# Certain ingress controllers like nginx cannot serve HTTP 1 and GRPC with a single ingress because GRPC can only +# enabled on the ingress object, not on backend services (GRPC annotation is set on the ingress, not on the services). +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-grpc + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/backend-protocol: GRPC +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + # NOTE: Port 81 in flyteadmin is the GRPC server port for FlyteAdmin. + - path: /flyteidl.service.HealthService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.HealthService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.AuthMetadataService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl.service.AuthMetadataService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl2.auth.AuthMetadataService + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc + - path: /flyteidl2.auth.AuthMetadataService/* + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-grpc-streaming + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/backend-protocol: GRPC +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: /flyteidl.service.WatchService/WatchExecutionStatusUpdates + pathType: ImplementationSpecific + backend: + service: + name: flyteadmin + port: + name: grpc +--- +# Source: controlplane/templates/flyte-core-app.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: controlplane-console-protected + namespace: union + annotations: + nginx.ingress.kubernetes.io/app-root: /v2 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/limit-rps: "100" + nginx.ingress.kubernetes.io/proxy-body-size: 6m + nginx.ingress.kubernetes.io/proxy-buffer-size: 32k + nginx.ingress.kubernetes.io/proxy-buffers: 4 32k + nginx.ingress.kubernetes.io/proxy-cookie-domain: ~^ .$host + nginx.ingress.kubernetes.io/server-snippet: | + client_header_timeout 604800; + client_body_timeout 604800; + # Increasing the default configuration from + # client_header_buffer_size 1k; + # large_client_header_buffers 4 8k; + # to default of 16k and 32k for large buffer sizes. These sizes are chosen as a short term mediation until we can collect data to reason + # about expected header sizs (PE-1101). + # Historically, we have seen is with the previous 8k max buffer size , the auth endpoint of /me would throw 400 Bad request and due to this ingress controller + # threw a 500 as it doesn't expect this status code on auth request expected range : 200 <= authcall.status(i.e status of /me call) <=300 + # Code link for ref : https://github.com/nginx/nginx/blob/e734df6664e70f118ca3140bcef6d4f1750fa8fa/src/http/modules/ngx_http_auth_request_module.c#L170-L179 + # Now the main reason we have seen 400 bad request is large size of the cookies which contribute to the header size. + # We should keep reducing the size of what headers are being sent meanwhile we increase this size to mitigate the long header issue. + client_header_buffer_size 16k; + large_client_header_buffers 64 32k; + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/auth-cache-key: $http_flyte_authorization$http_cookie + nginx.ingress.kubernetes.io/auth-response-headers: Set-Cookie,X-User-Subject,X-User-Claim-Identitytype,X-User-Claim-Preferred-Username,X-User-Token + nginx.ingress.kubernetes.io/auth-signin: https://$host/login?redirect_url=$escaped_request_uri + nginx.ingress.kubernetes.io/auth-url: https://$host/me + nginx.org/websocket-services: dataproxy-service +spec: + ingressClassName: "controlplane" + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret + rules: + - host: fake-host.domain + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + # NOTE: If you change this, you must update the BASE_URL value in flyteconsole.yaml + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /console/* + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /dashboard + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /dashboard/* + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /resources + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /resources/* + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /cost + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /cost/* + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /loading + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /loading/* + pathType: ImplementationSpecific + backend: + service: + name: flyteconsole + port: + name: http + - path: /v2 + pathType: ImplementationSpecific + backend: + service: + name: unionconsole + port: + name: http + - path: /v2/* + pathType: ImplementationSpecific + backend: + service: + name: unionconsole + port: + name: http +--- +# Source: controlplane/charts/scylla-operator/templates/validatingwebhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from: scylla-operator/scylla-operator-serving-cert + name: scylla-operator +webhooks: +- name: webhook.scylla.scylladb.com + clientConfig: + service: + name: scylla-operator-webhook + namespace: scylla-operator + path: /validate + admissionReviewVersions: + - v1 + sideEffects: None + failurePolicy: Fail + rules: + - apiGroups: + - scylla.scylladb.com + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scyllaclusters + - apiGroups: + - scylla.scylladb.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - nodeconfigs + - scyllaoperatorconfigs + - scylladbdatacenters + - scylladbclusters + - scylladbmanagerclusterregistrations + - scylladbmanagertasks +--- +# Source: controlplane/templates/secret.yaml +--- +--- +# Source: controlplane/templates/secret.yaml +--- +--- +# Source: controlplane/templates/secret.yaml +--- +--- +# Source: controlplane/templates/secret.yaml +--- +--- +# Source: controlplane/charts/scylla-operator/templates/certificate.yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: scylla-operator-serving-cert + namespace: scylla-operator +spec: + dnsNames: + - scylla-operator-webhook.scylla-operator.svc + issuerRef: + kind: Issuer + name: scylla-operator-selfsigned-issuer + secretName: scylla-operator-serving-cert +--- +# Source: controlplane/charts/scylla-operator/templates/issuer.yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: scylla-operator-selfsigned-issuer + namespace: scylla-operator +spec: + selfSigned: {} +--- +# Source: controlplane/charts/scylla/templates/scyllacluster.yaml +apiVersion: scylla.scylladb.com/v1 +kind: ScyllaCluster +metadata: + name: scylla + namespace: union +spec: + version: 2025.1.5 + agentVersion: 3.5.1@sha256:d1b57d08b9949c8faad2048fdf4dc7c502dae81da856c3c6b3a77dd347d5c7fc + repository: scylladb/scylla + agentRepository: scylladb/scylla-manager-agent + developerMode: true + sysctls: + - fs.aio-max-nr=30000000 + datacenter: + name: dc1 + racks: + - agentResources: + requests: + cpu: 50m + memory: 10M + members: 3 + name: rack1 + placement: + nodeAffinity: {} + tolerations: [] + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + storage: + capacity: 100Gi + storageClassName: scylladb diff --git a/tests/generated/controlplane.userclouds.yaml b/tests/generated/controlplane.userclouds.yaml index c1509b4f..467060e0 100644 --- a/tests/generated/controlplane.userclouds.yaml +++ b/tests/generated/controlplane.userclouds.yaml @@ -428,6 +428,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cloudEvents: enable: false connection: @@ -556,6 +557,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache-server: grpcPort: 8089 grpcServerReflection: true @@ -618,6 +620,7 @@ data: - authorization - flyte-authorization type: Noop + useExternalIdentity: 'false' cache: identity: enabled: false @@ -674,6 +677,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -745,6 +749,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -804,6 +809,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -884,6 +890,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -949,6 +956,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' cache: identity: enabled: false @@ -1014,6 +1022,7 @@ data: host: dns:///authorizer.union.svc.cluster.local:80 insecure: true type: Authorizer + useExternalIdentity: 'false' billing: enable: false cache: @@ -5924,7 +5933,7 @@ spec: template: metadata: annotations: - configChecksum: "fc871cf99064ca6139c9fcc466301e09c5e563224da119eee7917619533d186" + configChecksum: "dc3dc4f3816ef7148d98e27dcc7defab7a44bd71f5b0f04b7ea159f820ee7a5" kubectl.kubernetes.io/default-container: flyteadmin labels: app.kubernetes.io/name: flyteadmin @@ -6303,7 +6312,7 @@ spec: template: metadata: annotations: - configChecksum: "834100fee446d8308f7d12fe24d7a68755250921e563db9c09155da8a5826e8" + configChecksum: "2e0dd8aaa6e10c3fa6adb425cef7da9523898c3aa6de3c297f60838d7142d31" linkerd.io/inject: disabled prometheus.io/path: /metrics prometheus.io/port: "10254" diff --git a/tests/values/controlplane.external-authz.yaml b/tests/values/controlplane.external-authz.yaml new file mode 100644 index 00000000..0e184b3d --- /dev/null +++ b/tests/values/controlplane.external-authz.yaml @@ -0,0 +1,48 @@ +global: + INTERNAL_CLIENT_ID: "test-internal-client-id" + AUTH_TOKEN_URL: "https://test.example.com/oauth2/v1/token" + USE_EXTERNAL_IDENTITY: "true" + +dbHost: "db-instance-url" +dbName: "dbName" +dbUser: "dbUser" +dbPass: "dbPass" +bucketName: "bucketName" +artifactsBucketName: "artifactsBucketName" + +configMap: + connection: + environment: staging + region: us-east-2 + rootTenantURLPattern: dns:///fake-host.domain +controlplane: + enabled: true +ingress: + host: fake-host.domain + tls: + - hosts: + - fake-host.domain + secretName: fake-host-tls-secret +flyte: + common: + ingress: + tls: + secretName: fake-host-tls-secret + host: fake-host.domain + configmap: + admin: + admin: + endpoint: dns:///fake-host.domain + insecure: false + +# External authorization — customer-provided gRPC server +services: + authorizer: + configMap: + authorizer: + type: "External" + externalClient: + grpcConfig: + host: "dns:///my-authz-server.default.svc.cluster.local:50051" + insecure: true + failOpen: false From e003509a000d3eef177616ef24d8579e5c9bcd2c Mon Sep 17 00:00:00 2001 From: Michael Hotan Date: Fri, 3 Apr 2026 13:48:05 +1100 Subject: [PATCH 4/4] Release 2026.4.0 (#321) * Release 2026.4.0 Bump chart version and appVersion to 2026.4.0. Co-Authored-By: Claude Opus 4.6 (1M context) * Bump appVersion to 2026.4.1 Co-Authored-By: Claude Opus 4.6 (1M context) * Bump appVersion to 2026.4.2 Co-Authored-By: Claude Opus 4.6 (1M context) --------- Co-authored-by: Claude Opus 4.6 (1M context) --- charts/controlplane/Chart.yaml | 4 +- charts/dataplane-crds/Chart.yaml | 2 +- charts/dataplane/Chart.yaml | 4 +- charts/sandbox/Chart.yaml | 2 +- .../controlplane.aws.billing-enable.yaml | 172 +++++++++--------- tests/generated/controlplane.aws.yaml | 172 +++++++++--------- .../controlplane.external-authz.yaml | 172 +++++++++--------- tests/generated/controlplane.userclouds.yaml | 172 +++++++++--------- .../dataplane.additional-podlabels.yaml | 42 ++--- .../dataplane.additional-templates.yaml | 42 ++--- .../generated/dataplane.aws.eks-automode.yaml | 42 ++--- .../generated/dataplane.aws.with-ingress.yaml | 42 ++--- tests/generated/dataplane.aws.yaml | 42 ++--- ...dataplane.azure-custom-storage-prefix.yaml | 42 ++--- tests/generated/dataplane.azure.yaml | 42 ++--- tests/generated/dataplane.cost.yaml | 42 ++--- tests/generated/dataplane.dcgm-exporter.yaml | 42 ++--- .../generated/dataplane.fully-selfhosted.yaml | 40 ++-- tests/generated/dataplane.low-priv.yaml | 40 ++-- tests/generated/dataplane.monitoring.yaml | 42 ++--- tests/generated/dataplane.nodeobserver.yaml | 44 ++--- tests/generated/dataplane.oci.yaml | 42 ++--- 22 files changed, 643 insertions(+), 643 deletions(-) diff --git a/charts/controlplane/Chart.yaml b/charts/controlplane/Chart.yaml index 36df4052..99c436c9 100644 --- a/charts/controlplane/Chart.yaml +++ b/charts/controlplane/Chart.yaml @@ -3,8 +3,8 @@ name: controlplane description: Deploys the Union controlplane components to onboard a kubernetes cluster to the Union Cloud. type: application icon: https://i.ibb.co/JxfDQsL/Union-Symbol-yellow-2.png -version: 2026.3.12 -appVersion: 2026.3.9 +version: 2026.4.0 +appVersion: 2026.4.2 kubeVersion: '>= 1.28.0-0' dependencies: - name: flyte-core diff --git a/charts/dataplane-crds/Chart.yaml b/charts/dataplane-crds/Chart.yaml index deae6b93..b5d635ee 100644 --- a/charts/dataplane-crds/Chart.yaml +++ b/charts/dataplane-crds/Chart.yaml @@ -3,7 +3,7 @@ name: dataplane-crds description: Deploys the Union dataplane CRDs. type: application icon: https://i.ibb.co/JxfDQsL/Union-Symbol-yellow-2.png -version: 2026.3.12 +version: 2026.4.0 appVersion: 2026.3.6 kubeVersion: '>= 1.28.0-0' dependencies: diff --git a/charts/dataplane/Chart.yaml b/charts/dataplane/Chart.yaml index 1e8fbc84..56396e87 100644 --- a/charts/dataplane/Chart.yaml +++ b/charts/dataplane/Chart.yaml @@ -3,8 +3,8 @@ name: dataplane description: Deploys the Union dataplane components to onboard a kubernetes cluster to the Union Cloud. type: application icon: https://i.ibb.co/JxfDQsL/Union-Symbol-yellow-2.png -version: 2026.3.12 -appVersion: 2026.3.9 +version: 2026.4.0 +appVersion: 2026.4.2 kubeVersion: '>= 1.28.0-0' dependencies: - name: kube-state-metrics diff --git a/charts/sandbox/Chart.yaml b/charts/sandbox/Chart.yaml index b4f87f99..22be3d4b 100644 --- a/charts/sandbox/Chart.yaml +++ b/charts/sandbox/Chart.yaml @@ -3,6 +3,6 @@ name: sandbox description: Deploys extras for sandbox testing. type: application icon: https://i.ibb.co/JxfDQsL/Union-Symbol-yellow-2.png -version: 2026.3.12 +version: 2026.4.0 appVersion: 2026.3.6 kubeVersion: '>= 1.28.0' diff --git a/tests/generated/controlplane.aws.billing-enable.yaml b/tests/generated/controlplane.aws.billing-enable.yaml index 48412416..c6157d42 100644 --- a/tests/generated/controlplane.aws.billing-enable.yaml +++ b/tests/generated/controlplane.aws.billing-enable.yaml @@ -37,10 +37,10 @@ kind: PodDisruptionBudget metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: minAvailable: "33%" @@ -213,7 +213,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/console/serviceaccount.yaml @@ -222,10 +222,10 @@ kind: ServiceAccount metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -235,10 +235,10 @@ kind: ServiceAccount metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -247,10 +247,10 @@ kind: ServiceAccount metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -259,10 +259,10 @@ kind: ServiceAccount metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -271,10 +271,10 @@ kind: ServiceAccount metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -283,10 +283,10 @@ kind: ServiceAccount metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -295,10 +295,10 @@ kind: ServiceAccount metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -307,10 +307,10 @@ kind: ServiceAccount metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/charts/flyte/templates/admin/secret.yaml @@ -531,7 +531,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm data: db.yaml: | @@ -599,10 +599,10 @@ kind: ConfigMap metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -660,10 +660,10 @@ kind: ConfigMap metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -732,10 +732,10 @@ kind: ConfigMap metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -792,10 +792,10 @@ kind: ConfigMap metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -873,10 +873,10 @@ kind: ConfigMap metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -939,10 +939,10 @@ kind: ConfigMap metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -1005,10 +1005,10 @@ kind: ConfigMap metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -5475,7 +5475,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm rules: - apiGroups: @@ -5506,7 +5506,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io @@ -5604,7 +5604,7 @@ metadata: platform.union.ai/prometheus-group: "union-services" app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5631,10 +5631,10 @@ metadata: name: unionconsole labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5659,10 +5659,10 @@ metadata: name: authorizer labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5698,10 +5698,10 @@ metadata: name: cluster labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5737,10 +5737,10 @@ metadata: name: dataproxy labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5772,10 +5772,10 @@ metadata: name: executions labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5807,10 +5807,10 @@ metadata: name: queue labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5842,10 +5842,10 @@ metadata: name: run-scheduler labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5877,10 +5877,10 @@ metadata: name: usage labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -6301,7 +6301,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -6312,14 +6312,14 @@ spec: template: metadata: annotations: - configChecksum: "2e0dd8aaa6e10c3fa6adb425cef7da9523898c3aa6de3c297f60838d7142d31" + configChecksum: "01508c809611baccb3c60c4384e131ef74857378fa3b630633c8541d232d03e" linkerd.io/inject: disabled prometheus.io/path: /metrics prometheus.io/port: "10254" labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: securityContext: @@ -6411,10 +6411,10 @@ kind: Deployment metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: strategy: @@ -6451,7 +6451,7 @@ spec: capabilities: drop: - ALL - image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.3.9" + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.4.2" imagePullPolicy: IfNotPresent ports: - name: http @@ -6475,10 +6475,10 @@ kind: Deployment metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6514,7 +6514,7 @@ spec: name: authorizer containers: - name: authorizer - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - authorizer @@ -6588,10 +6588,10 @@ kind: Deployment metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6627,7 +6627,7 @@ spec: name: cluster initContainers: - name: cluster-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6643,7 +6643,7 @@ spec: mountPath: /etc/config/ containers: - name: cluster - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6717,10 +6717,10 @@ kind: Deployment metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6756,7 +6756,7 @@ spec: name: dataproxy containers: - name: dataproxy - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - dataproxy @@ -6827,10 +6827,10 @@ kind: Deployment metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6866,7 +6866,7 @@ spec: name: executions initContainers: - name: executions-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6882,7 +6882,7 @@ spec: mountPath: /etc/config/ containers: - name: executions - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6953,10 +6953,10 @@ kind: Deployment metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -6993,7 +6993,7 @@ spec: name: queue initContainers: - name: queue-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7009,7 +7009,7 @@ spec: mountPath: /etc/config/ containers: - name: queue - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7080,10 +7080,10 @@ kind: Deployment metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7119,7 +7119,7 @@ spec: name: run-scheduler initContainers: - name: run-scheduler-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7135,7 +7135,7 @@ spec: mountPath: /etc/config/ containers: - name: run-scheduler - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7207,10 +7207,10 @@ kind: Deployment metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7246,7 +7246,7 @@ spec: name: usage containers: - name: usage - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - usage @@ -7353,10 +7353,10 @@ kind: HorizontalPodAutoscaler metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: scaleTargetRef: diff --git a/tests/generated/controlplane.aws.yaml b/tests/generated/controlplane.aws.yaml index cc1a528f..3e58b0aa 100644 --- a/tests/generated/controlplane.aws.yaml +++ b/tests/generated/controlplane.aws.yaml @@ -37,10 +37,10 @@ kind: PodDisruptionBudget metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: minAvailable: "33%" @@ -213,7 +213,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/console/serviceaccount.yaml @@ -222,10 +222,10 @@ kind: ServiceAccount metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -235,10 +235,10 @@ kind: ServiceAccount metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -247,10 +247,10 @@ kind: ServiceAccount metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -259,10 +259,10 @@ kind: ServiceAccount metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -271,10 +271,10 @@ kind: ServiceAccount metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -283,10 +283,10 @@ kind: ServiceAccount metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -295,10 +295,10 @@ kind: ServiceAccount metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -307,10 +307,10 @@ kind: ServiceAccount metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/charts/flyte/templates/admin/secret.yaml @@ -531,7 +531,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm data: db.yaml: | @@ -599,10 +599,10 @@ kind: ConfigMap metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -660,10 +660,10 @@ kind: ConfigMap metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -732,10 +732,10 @@ kind: ConfigMap metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -792,10 +792,10 @@ kind: ConfigMap metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -873,10 +873,10 @@ kind: ConfigMap metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -939,10 +939,10 @@ kind: ConfigMap metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -1005,10 +1005,10 @@ kind: ConfigMap metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -5475,7 +5475,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm rules: - apiGroups: @@ -5506,7 +5506,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io @@ -5604,7 +5604,7 @@ metadata: platform.union.ai/prometheus-group: "union-services" app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5631,10 +5631,10 @@ metadata: name: unionconsole labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5659,10 +5659,10 @@ metadata: name: authorizer labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5698,10 +5698,10 @@ metadata: name: cluster labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5737,10 +5737,10 @@ metadata: name: dataproxy labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5772,10 +5772,10 @@ metadata: name: executions labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5807,10 +5807,10 @@ metadata: name: queue labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5842,10 +5842,10 @@ metadata: name: run-scheduler labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5877,10 +5877,10 @@ metadata: name: usage labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -6301,7 +6301,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -6312,7 +6312,7 @@ spec: template: metadata: annotations: - configChecksum: "2e0dd8aaa6e10c3fa6adb425cef7da9523898c3aa6de3c297f60838d7142d31" + configChecksum: "01508c809611baccb3c60c4384e131ef74857378fa3b630633c8541d232d03e" linkerd.io/inject: disabled prometheus.io/path: /metrics prometheus.io/port: "10254" @@ -6320,7 +6320,7 @@ spec: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: securityContext: @@ -6412,10 +6412,10 @@ kind: Deployment metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: strategy: @@ -6453,7 +6453,7 @@ spec: capabilities: drop: - ALL - image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.3.9" + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.4.2" imagePullPolicy: IfNotPresent ports: - name: http @@ -6477,10 +6477,10 @@ kind: Deployment metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6517,7 +6517,7 @@ spec: name: authorizer containers: - name: authorizer - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - authorizer @@ -6591,10 +6591,10 @@ kind: Deployment metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6631,7 +6631,7 @@ spec: name: cluster initContainers: - name: cluster-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6647,7 +6647,7 @@ spec: mountPath: /etc/config/ containers: - name: cluster - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6721,10 +6721,10 @@ kind: Deployment metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6761,7 +6761,7 @@ spec: name: dataproxy containers: - name: dataproxy - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - dataproxy @@ -6832,10 +6832,10 @@ kind: Deployment metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6872,7 +6872,7 @@ spec: name: executions initContainers: - name: executions-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6888,7 +6888,7 @@ spec: mountPath: /etc/config/ containers: - name: executions - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6959,10 +6959,10 @@ kind: Deployment metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -7000,7 +7000,7 @@ spec: name: queue initContainers: - name: queue-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7016,7 +7016,7 @@ spec: mountPath: /etc/config/ containers: - name: queue - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7087,10 +7087,10 @@ kind: Deployment metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7127,7 +7127,7 @@ spec: name: run-scheduler initContainers: - name: run-scheduler-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7143,7 +7143,7 @@ spec: mountPath: /etc/config/ containers: - name: run-scheduler - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7215,10 +7215,10 @@ kind: Deployment metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7255,7 +7255,7 @@ spec: name: usage containers: - name: usage - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - usage @@ -7356,10 +7356,10 @@ kind: HorizontalPodAutoscaler metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: scaleTargetRef: diff --git a/tests/generated/controlplane.external-authz.yaml b/tests/generated/controlplane.external-authz.yaml index 21337025..dfc90c04 100644 --- a/tests/generated/controlplane.external-authz.yaml +++ b/tests/generated/controlplane.external-authz.yaml @@ -37,10 +37,10 @@ kind: PodDisruptionBudget metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: minAvailable: "33%" @@ -211,7 +211,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/console/serviceaccount.yaml @@ -220,10 +220,10 @@ kind: ServiceAccount metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -233,10 +233,10 @@ kind: ServiceAccount metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -245,10 +245,10 @@ kind: ServiceAccount metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -257,10 +257,10 @@ kind: ServiceAccount metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -269,10 +269,10 @@ kind: ServiceAccount metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -281,10 +281,10 @@ kind: ServiceAccount metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -293,10 +293,10 @@ kind: ServiceAccount metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -305,10 +305,10 @@ kind: ServiceAccount metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/charts/flyte/templates/admin/secret.yaml @@ -529,7 +529,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm data: db.yaml: | @@ -597,10 +597,10 @@ kind: ConfigMap metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -662,10 +662,10 @@ kind: ConfigMap metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -734,10 +734,10 @@ kind: ConfigMap metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -794,10 +794,10 @@ kind: ConfigMap metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -875,10 +875,10 @@ kind: ConfigMap metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -941,10 +941,10 @@ kind: ConfigMap metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -1007,10 +1007,10 @@ kind: ConfigMap metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -5477,7 +5477,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm rules: - apiGroups: @@ -5508,7 +5508,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io @@ -5606,7 +5606,7 @@ metadata: platform.union.ai/prometheus-group: "union-services" app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5633,10 +5633,10 @@ metadata: name: unionconsole labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5661,10 +5661,10 @@ metadata: name: authorizer labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5700,10 +5700,10 @@ metadata: name: cluster labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5739,10 +5739,10 @@ metadata: name: dataproxy labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5774,10 +5774,10 @@ metadata: name: executions labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5809,10 +5809,10 @@ metadata: name: queue labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5844,10 +5844,10 @@ metadata: name: run-scheduler labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5879,10 +5879,10 @@ metadata: name: usage labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -6303,7 +6303,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -6314,14 +6314,14 @@ spec: template: metadata: annotations: - configChecksum: "7eede7c92a6d230760dd9b2dd3404831ea5bc338b34159ebc5290cdbf18b560" + configChecksum: "94b611586b268a0c61829ad58cb13c0a84760e904f9556ce7680b2ba9a3ef48" linkerd.io/inject: disabled prometheus.io/path: /metrics prometheus.io/port: "10254" labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: securityContext: @@ -6413,10 +6413,10 @@ kind: Deployment metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: strategy: @@ -6453,7 +6453,7 @@ spec: capabilities: drop: - ALL - image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.3.9" + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.4.2" imagePullPolicy: IfNotPresent ports: - name: http @@ -6477,10 +6477,10 @@ kind: Deployment metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6516,7 +6516,7 @@ spec: name: authorizer containers: - name: authorizer - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - authorizer @@ -6590,10 +6590,10 @@ kind: Deployment metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6629,7 +6629,7 @@ spec: name: cluster initContainers: - name: cluster-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6645,7 +6645,7 @@ spec: mountPath: /etc/config/ containers: - name: cluster - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6719,10 +6719,10 @@ kind: Deployment metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6758,7 +6758,7 @@ spec: name: dataproxy containers: - name: dataproxy - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - dataproxy @@ -6829,10 +6829,10 @@ kind: Deployment metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6868,7 +6868,7 @@ spec: name: executions initContainers: - name: executions-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6884,7 +6884,7 @@ spec: mountPath: /etc/config/ containers: - name: executions - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6955,10 +6955,10 @@ kind: Deployment metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -6995,7 +6995,7 @@ spec: name: queue initContainers: - name: queue-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7011,7 +7011,7 @@ spec: mountPath: /etc/config/ containers: - name: queue - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7082,10 +7082,10 @@ kind: Deployment metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7121,7 +7121,7 @@ spec: name: run-scheduler initContainers: - name: run-scheduler-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7137,7 +7137,7 @@ spec: mountPath: /etc/config/ containers: - name: run-scheduler - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7209,10 +7209,10 @@ kind: Deployment metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7248,7 +7248,7 @@ spec: name: usage containers: - name: usage - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - usage @@ -7355,10 +7355,10 @@ kind: HorizontalPodAutoscaler metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: scaleTargetRef: diff --git a/tests/generated/controlplane.userclouds.yaml b/tests/generated/controlplane.userclouds.yaml index 467060e0..347cf5f0 100644 --- a/tests/generated/controlplane.userclouds.yaml +++ b/tests/generated/controlplane.userclouds.yaml @@ -37,10 +37,10 @@ kind: PodDisruptionBudget metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: minAvailable: "33%" @@ -213,7 +213,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/console/serviceaccount.yaml @@ -222,10 +222,10 @@ kind: ServiceAccount metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -235,10 +235,10 @@ kind: ServiceAccount metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -247,10 +247,10 @@ kind: ServiceAccount metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -259,10 +259,10 @@ kind: ServiceAccount metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -271,10 +271,10 @@ kind: ServiceAccount metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -283,10 +283,10 @@ kind: ServiceAccount metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -295,10 +295,10 @@ kind: ServiceAccount metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/templates/serviceaccount.yaml @@ -307,10 +307,10 @@ kind: ServiceAccount metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm --- # Source: controlplane/charts/flyte/templates/admin/secret.yaml @@ -531,7 +531,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm data: db.yaml: | @@ -599,10 +599,10 @@ kind: ConfigMap metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -660,10 +660,10 @@ kind: ConfigMap metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -732,10 +732,10 @@ kind: ConfigMap metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -792,10 +792,10 @@ kind: ConfigMap metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -873,10 +873,10 @@ kind: ConfigMap metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -939,10 +939,10 @@ kind: ConfigMap metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -1005,10 +1005,10 @@ kind: ConfigMap metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm data: config.yaml: | @@ -5475,7 +5475,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm rules: - apiGroups: @@ -5506,7 +5506,7 @@ metadata: labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 #app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io @@ -5604,7 +5604,7 @@ metadata: platform.union.ai/prometheus-group: "union-services" app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5631,10 +5631,10 @@ metadata: name: unionconsole labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5659,10 +5659,10 @@ metadata: name: authorizer labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5698,10 +5698,10 @@ metadata: name: cluster labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5737,10 +5737,10 @@ metadata: name: dataproxy labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5772,10 +5772,10 @@ metadata: name: executions labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5807,10 +5807,10 @@ metadata: name: queue labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5842,10 +5842,10 @@ metadata: name: run-scheduler labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -5877,10 +5877,10 @@ metadata: name: usage labels: platform.union.ai/prometheus-group: "union-services" - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -6301,7 +6301,7 @@ metadata: labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -6312,14 +6312,14 @@ spec: template: metadata: annotations: - configChecksum: "2e0dd8aaa6e10c3fa6adb425cef7da9523898c3aa6de3c297f60838d7142d31" + configChecksum: "01508c809611baccb3c60c4384e131ef74857378fa3b630633c8541d232d03e" linkerd.io/inject: disabled prometheus.io/path: /metrics prometheus.io/port: "10254" labels: app.kubernetes.io/name: cacheservice app.kubernetes.io/instance: release-name - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/managed-by: Helm spec: securityContext: @@ -6411,10 +6411,10 @@ kind: Deployment metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: strategy: @@ -6451,7 +6451,7 @@ spec: capabilities: drop: - ALL - image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.3.9" + image: "643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/unionconsole:2026.4.2" imagePullPolicy: IfNotPresent ports: - name: http @@ -6475,10 +6475,10 @@ kind: Deployment metadata: name: authorizer labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: authorizer app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6514,7 +6514,7 @@ spec: name: authorizer containers: - name: authorizer - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - authorizer @@ -6588,10 +6588,10 @@ kind: Deployment metadata: name: cluster labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: cluster app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6627,7 +6627,7 @@ spec: name: cluster initContainers: - name: cluster-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6643,7 +6643,7 @@ spec: mountPath: /etc/config/ containers: - name: cluster - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudcluster @@ -6717,10 +6717,10 @@ kind: Deployment metadata: name: dataproxy labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: dataproxy app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6756,7 +6756,7 @@ spec: name: dataproxy containers: - name: dataproxy - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - dataproxy @@ -6827,10 +6827,10 @@ kind: Deployment metadata: name: executions labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: executions app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -6866,7 +6866,7 @@ spec: name: executions initContainers: - name: executions-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6882,7 +6882,7 @@ spec: mountPath: /etc/config/ containers: - name: executions - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -6953,10 +6953,10 @@ kind: Deployment metadata: name: queue labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: queue app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -6993,7 +6993,7 @@ spec: name: queue initContainers: - name: queue-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7009,7 +7009,7 @@ spec: mountPath: /etc/config/ containers: - name: queue - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - queue @@ -7080,10 +7080,10 @@ kind: Deployment metadata: name: run-scheduler labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: run-scheduler app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7119,7 +7119,7 @@ spec: name: run-scheduler initContainers: - name: run-scheduler-migrate - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7135,7 +7135,7 @@ spec: mountPath: /etc/config/ containers: - name: run-scheduler - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - cloudpropeller @@ -7207,10 +7207,10 @@ kind: Deployment metadata: name: usage labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: usage app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: selector: @@ -7246,7 +7246,7 @@ spec: name: usage containers: - name: usage - image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.3.9 + image: 643379628101.dkr.ecr.us-east-1.amazonaws.com/union-cp/services:2026.4.2 imagePullPolicy: IfNotPresent args: - usage @@ -7347,10 +7347,10 @@ kind: HorizontalPodAutoscaler metadata: name: unionconsole labels: - helm.sh/chart: controlplane-2026.3.12 + helm.sh/chart: controlplane-2026.4.0 app.kubernetes.io/name: unionconsole app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm spec: scaleTargetRef: diff --git a/tests/generated/dataplane.additional-podlabels.yaml b/tests/generated/dataplane.additional-podlabels.yaml index 0499593e..a4948404 100644 --- a/tests/generated/dataplane.additional-podlabels.yaml +++ b/tests/generated/dataplane.additional-podlabels.yaml @@ -130,10 +130,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2802,10 +2802,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4271,10 +4271,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4533,10 +4533,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4857,10 +4857,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5278,7 +5278,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5471,7 +5471,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5581,7 +5581,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5649,7 +5649,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5716,7 +5716,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5790,10 +5790,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5806,7 +5806,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5891,7 +5891,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -5946,7 +5946,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6085,7 +6085,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.additional-templates.yaml b/tests/generated/dataplane.additional-templates.yaml index ea769971..b001ac35 100644 --- a/tests/generated/dataplane.additional-templates.yaml +++ b/tests/generated/dataplane.additional-templates.yaml @@ -130,10 +130,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2851,10 +2851,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4330,10 +4330,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4592,10 +4592,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4916,10 +4916,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5335,7 +5335,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5526,7 +5526,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5634,7 +5634,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5702,7 +5702,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5767,7 +5767,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5841,10 +5841,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5857,7 +5857,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5940,7 +5940,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -5995,7 +5995,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6132,7 +6132,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.aws.eks-automode.yaml b/tests/generated/dataplane.aws.eks-automode.yaml index 36c92ade..10676f8a 100644 --- a/tests/generated/dataplane.aws.eks-automode.yaml +++ b/tests/generated/dataplane.aws.eks-automode.yaml @@ -167,10 +167,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2987,10 +2987,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4491,10 +4491,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4753,10 +4753,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -5158,10 +5158,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5830,7 +5830,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -6021,7 +6021,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -6129,7 +6129,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -6197,7 +6197,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -6262,7 +6262,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -6336,10 +6336,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -6352,7 +6352,7 @@ spec: template: metadata: annotations: - configChecksum: "9c6210cbfc32e7b47fc43cb966fbdb6a91e587724b75392c3bc30fcbc8db21c" + configChecksum: "7ac64d197409855c9e55448deb254a5a2d51fa10a5d86653db672ad6e181f69" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -6435,7 +6435,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6490,7 +6490,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6627,7 +6627,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.aws.with-ingress.yaml b/tests/generated/dataplane.aws.with-ingress.yaml index 3e5825be..e4a08d6b 100644 --- a/tests/generated/dataplane.aws.with-ingress.yaml +++ b/tests/generated/dataplane.aws.with-ingress.yaml @@ -138,10 +138,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2814,10 +2814,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4283,10 +4283,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4545,10 +4545,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4869,10 +4869,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5288,7 +5288,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5479,7 +5479,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5587,7 +5587,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5655,7 +5655,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5720,7 +5720,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5794,10 +5794,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5810,7 +5810,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5893,7 +5893,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -5948,7 +5948,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6085,7 +6085,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.aws.yaml b/tests/generated/dataplane.aws.yaml index a3ef09df..d4575c64 100644 --- a/tests/generated/dataplane.aws.yaml +++ b/tests/generated/dataplane.aws.yaml @@ -167,10 +167,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2944,10 +2944,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4427,10 +4427,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4689,10 +4689,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -5094,10 +5094,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5766,7 +5766,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5957,7 +5957,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -6065,7 +6065,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -6133,7 +6133,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -6198,7 +6198,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -6272,10 +6272,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -6288,7 +6288,7 @@ spec: template: metadata: annotations: - configChecksum: "9c6210cbfc32e7b47fc43cb966fbdb6a91e587724b75392c3bc30fcbc8db21c" + configChecksum: "7ac64d197409855c9e55448deb254a5a2d51fa10a5d86653db672ad6e181f69" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -6371,7 +6371,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6426,7 +6426,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6563,7 +6563,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.azure-custom-storage-prefix.yaml b/tests/generated/dataplane.azure-custom-storage-prefix.yaml index 8d31126c..8516492f 100644 --- a/tests/generated/dataplane.azure-custom-storage-prefix.yaml +++ b/tests/generated/dataplane.azure-custom-storage-prefix.yaml @@ -138,10 +138,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2861,10 +2861,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4378,10 +4378,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4640,10 +4640,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4964,10 +4964,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5384,7 +5384,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5576,7 +5576,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5685,7 +5685,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5753,7 +5753,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5819,7 +5819,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5893,10 +5893,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5909,7 +5909,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5993,7 +5993,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6048,7 +6048,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6186,7 +6186,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.azure.yaml b/tests/generated/dataplane.azure.yaml index f36aab90..81acf14a 100644 --- a/tests/generated/dataplane.azure.yaml +++ b/tests/generated/dataplane.azure.yaml @@ -140,10 +140,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2863,10 +2863,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4380,10 +4380,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4642,10 +4642,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4966,10 +4966,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5386,7 +5386,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5578,7 +5578,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5687,7 +5687,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5755,7 +5755,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5821,7 +5821,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5895,10 +5895,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5911,7 +5911,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5995,7 +5995,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6050,7 +6050,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6188,7 +6188,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.cost.yaml b/tests/generated/dataplane.cost.yaml index 7ff80142..ca10fa55 100644 --- a/tests/generated/dataplane.cost.yaml +++ b/tests/generated/dataplane.cost.yaml @@ -115,10 +115,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2793,10 +2793,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4173,10 +4173,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4414,10 +4414,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4715,10 +4715,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5040,7 +5040,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5231,7 +5231,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5339,7 +5339,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5407,7 +5407,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5472,7 +5472,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5546,10 +5546,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5562,7 +5562,7 @@ spec: template: metadata: annotations: - configChecksum: "fb0ca51551fcdbb3bcbaaf4371adb7d1bc14dff9c5233eab2ce01124b1a532b" + configChecksum: "e3d49725f610398508f2f7fcdbdb77501a9db64363e2dc42f437b0a4e7f40ff" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5645,7 +5645,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -5700,7 +5700,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -5837,7 +5837,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.dcgm-exporter.yaml b/tests/generated/dataplane.dcgm-exporter.yaml index c66062a9..b729d61d 100644 --- a/tests/generated/dataplane.dcgm-exporter.yaml +++ b/tests/generated/dataplane.dcgm-exporter.yaml @@ -159,10 +159,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2931,10 +2931,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4416,10 +4416,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4678,10 +4678,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -5083,10 +5083,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5623,7 +5623,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5814,7 +5814,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5922,7 +5922,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5990,7 +5990,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -6055,7 +6055,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -6129,10 +6129,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -6145,7 +6145,7 @@ spec: template: metadata: annotations: - configChecksum: "9c6210cbfc32e7b47fc43cb966fbdb6a91e587724b75392c3bc30fcbc8db21c" + configChecksum: "7ac64d197409855c9e55448deb254a5a2d51fa10a5d86653db672ad6e181f69" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -6228,7 +6228,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6283,7 +6283,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6420,7 +6420,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.fully-selfhosted.yaml b/tests/generated/dataplane.fully-selfhosted.yaml index f6a490e9..75360933 100644 --- a/tests/generated/dataplane.fully-selfhosted.yaml +++ b/tests/generated/dataplane.fully-selfhosted.yaml @@ -130,10 +130,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2818,10 +2818,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4297,10 +4297,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4559,10 +4559,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4883,10 +4883,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5302,7 +5302,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5482,7 +5482,7 @@ spec: name: executor containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5583,7 +5583,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5685,7 +5685,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5757,10 +5757,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5773,7 +5773,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5856,7 +5856,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -5911,7 +5911,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6048,7 +6048,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.low-priv.yaml b/tests/generated/dataplane.low-priv.yaml index 0590906a..d604c6f2 100644 --- a/tests/generated/dataplane.low-priv.yaml +++ b/tests/generated/dataplane.low-priv.yaml @@ -69,10 +69,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2589,10 +2589,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -3067,10 +3067,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -3135,10 +3135,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -3576,10 +3576,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -3914,7 +3914,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -4020,7 +4020,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -4088,7 +4088,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -4153,7 +4153,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -4227,10 +4227,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -4243,7 +4243,7 @@ spec: template: metadata: annotations: - configChecksum: "f65e87244fecd45b82addff1a75add5446555fb5f5abfe77212c17e0c8d3ddb" + configChecksum: "1aa8d697931486bde6d435f0694d0f56fff0390f575764fd2dd74c49cfe5004" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -4326,7 +4326,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -4381,7 +4381,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -4517,7 +4517,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.monitoring.yaml b/tests/generated/dataplane.monitoring.yaml index 5a905bf9..7d8396b1 100644 --- a/tests/generated/dataplane.monitoring.yaml +++ b/tests/generated/dataplane.monitoring.yaml @@ -217,10 +217,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -3629,10 +3629,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -5440,10 +5440,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -5794,10 +5794,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -6430,10 +6430,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -7395,7 +7395,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -7586,7 +7586,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -7694,7 +7694,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -7762,7 +7762,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -7827,7 +7827,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -7901,10 +7901,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -7917,7 +7917,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -8000,7 +8000,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -8055,7 +8055,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -8192,7 +8192,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.nodeobserver.yaml b/tests/generated/dataplane.nodeobserver.yaml index b918de4c..54e614a0 100644 --- a/tests/generated/dataplane.nodeobserver.yaml +++ b/tests/generated/dataplane.nodeobserver.yaml @@ -137,10 +137,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2825,10 +2825,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4313,10 +4313,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4589,10 +4589,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4913,10 +4913,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5129,7 +5129,7 @@ spec: privileged: true runAsNonRoot: false runAsUser: 0 - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5432,7 +5432,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5623,7 +5623,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5731,7 +5731,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5799,7 +5799,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5864,7 +5864,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5938,10 +5938,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5954,7 +5954,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -6037,7 +6037,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6092,7 +6092,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6229,7 +6229,7 @@ spec: value: http://union-operator-prometheus:80 - name: KNATIVE_PROXY_SERVICE_URL value: http://kourier-internal - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: diff --git a/tests/generated/dataplane.oci.yaml b/tests/generated/dataplane.oci.yaml index 747c0fc1..6c239916 100644 --- a/tests/generated/dataplane.oci.yaml +++ b/tests/generated/dataplane.oci.yaml @@ -130,10 +130,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus --- @@ -2837,10 +2837,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus data: @@ -4320,10 +4320,10 @@ kind: ClusterRole metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus rules: @@ -4582,10 +4582,10 @@ kind: ClusterRoleBinding metadata: name: union-operator-prometheus labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus roleRef: @@ -4906,10 +4906,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5331,7 +5331,7 @@ spec: value: "true" - name: SOME_NUMERIC_VAR value: "42" - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: sync-cluster-resources resources: @@ -5536,7 +5536,7 @@ spec: secretName: union-secret-auth containers: - name: executor - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent command: - executor @@ -5658,7 +5658,7 @@ spec: - name: operator-proxy securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5732,7 +5732,7 @@ spec: - name: "tunnel" securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent args: - cloudflared @@ -5805,7 +5805,7 @@ spec: - name: operator securityContext: {} - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: IfNotPresent terminationMessagePolicy: FallbackToLogsOnError resources: @@ -5893,10 +5893,10 @@ metadata: name: union-operator-prometheus namespace: union labels: - helm.sh/chart: dataplane-2026.3.12 + helm.sh/chart: dataplane-2026.4.0 app.kubernetes.io/name: release-name-dataplane app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "2026.3.9" + app.kubernetes.io/version: "2026.4.2" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: prometheus spec: @@ -5909,7 +5909,7 @@ spec: template: metadata: annotations: - configChecksum: "9505483b28e45abfefda9a9791a7719382b61225386ddfbdfea71a459a1423e" + configChecksum: "826b07095069d3c6cb845e7cf51f5f88ecc0156e97ef4c0020c0ef9ee365b65" labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: release-name-dataplane @@ -5992,7 +5992,7 @@ spec: serviceAccountName: flytepropeller-webhook-system initContainers: - name: generate-secrets - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6053,7 +6053,7 @@ spec: memory: 500Mi containers: - name: webhook - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" command: - flytepropeller @@ -6210,7 +6210,7 @@ spec: value: "true" - name: SOME_NUMERIC_VAR value: "42" - image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.3.9" + image: "public.ecr.aws/p0i0a9q8/unionoperator:2026.4.2" imagePullPolicy: "IfNotPresent" name: flytepropeller ports: