diff --git a/.github/scripts/end2end/configs/zenko.yaml b/.github/scripts/end2end/configs/zenko.yaml index c7a66d6ba..dbcfe674d 100644 --- a/.github/scripts/end2end/configs/zenko.yaml +++ b/.github/scripts/end2end/configs/zenko.yaml @@ -82,6 +82,10 @@ spec: enable: false scuba: replicas: 1 + api: + replicas: 1 + ingress: + hostname: ${ZENKO_SUR_INGRESS} management: provider: InCluster ui: @@ -123,9 +127,6 @@ spec: azure: archiveTier: "hot" restoreTimeout: "15s" - scuba: - logging: - logLevel: debug ingress: workloadPlaneClass: 'nginx' controlPlaneClass: 'nginx-control-plane' diff --git a/.github/scripts/end2end/configure-e2e-ctst.sh b/.github/scripts/end2end/configure-e2e-ctst.sh index a5deb710b..e1439867e 100755 --- a/.github/scripts/end2end/configure-e2e-ctst.sh +++ b/.github/scripts/end2end/configure-e2e-ctst.sh @@ -17,7 +17,7 @@ UUID=${UUID%.*} UUID=${UUID:1} echo "127.0.0.1 iam.zenko.local ui.zenko.local s3-local-file.zenko.local keycloak.zenko.local \ - sts.zenko.local management.zenko.local s3.zenko.local website.mywebsite.com" | sudo tee -a /etc/hosts + sts.zenko.local management.zenko.local s3.zenko.local website.mywebsite.com utilization.zenko.local" | sudo tee -a /etc/hosts # Add bucket notification target envsubst < ./configs/notification_destinations.yaml | kubectl apply -f - diff --git a/.github/scripts/end2end/deploy-zenko.sh b/.github/scripts/end2end/deploy-zenko.sh index ec6d31ab8..c7b5082a4 100755 --- a/.github/scripts/end2end/deploy-zenko.sh +++ b/.github/scripts/end2end/deploy-zenko.sh @@ -17,6 +17,7 @@ export ZENKO_STS_INGRESS=${ZENKO_STS_INGRESS:-'sts.zenko.local'} export ZENKO_MANAGEMENT_INGRESS=${ZENKO_MANAGEMENT_INGRESS:-'management.zenko.local'} export ZENKO_S3_INGRESS=${ZENKO_S3_INGRESS:-'s3.zenko.local'} export ZENKO_UI_INGRESS=${ZENKO_UI_INGRESS:-'ui.zenko.local'} +export ZENKO_SUR_INGRESS=${ZENKO_SUR_INGRESS:-'utilization.zenko.local'} export BACKBEAT_LCC_CRON_RULE=${BACKBEAT_LCC_CRON_RULE:-'*/5 * * * * *'} @@ -30,7 +31,8 @@ if [ ${ENABLE_KEYCLOAK_HTTPS} == 'true' ]; then - ${ZENKO_UI_INGRESS} - ${ZENKO_MANAGEMENT_INGRESS} - ${ZENKO_IAM_INGRESS} - - ${ZENKO_STS_INGRESS}" + - ${ZENKO_STS_INGRESS} + - ${ZENKO_SUR_INGRESS}" else export ZENKO_INGRESS_ANNOTATIONS="annotations: nginx.ingress.kubernetes.io/proxy-body-size: 0m" diff --git a/.github/scripts/end2end/patch-coredns.sh b/.github/scripts/end2end/patch-coredns.sh index b8b40fa37..b199ac978 100755 --- a/.github/scripts/end2end/patch-coredns.sh +++ b/.github/scripts/end2end/patch-coredns.sh @@ -24,6 +24,7 @@ corefile=" rewrite name exact ui.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact management.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact s3.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact utilization.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact sts.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact iam.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact shell-ui.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local diff --git a/.github/scripts/end2end/prepare-pra.sh b/.github/scripts/end2end/prepare-pra.sh index 933448845..8214b73b0 100644 --- a/.github/scripts/end2end/prepare-pra.sh +++ b/.github/scripts/end2end/prepare-pra.sh @@ -14,6 +14,7 @@ echo 'ZENKO_STS_INGRESS="sts.dr.zenko.local"' >> "$GITHUB_ENV" echo 'ZENKO_MANAGEMENT_INGRESS="management.dr.zenko.local"' >> "$GITHUB_ENV" echo 'ZENKO_S3_INGRESS="s3.dr.zenko.local"' >> "$GITHUB_ENV" echo 'ZENKO_UI_INGRESS="ui.dr.zenko.local"' >> "$GITHUB_ENV" +echo 'ZENKO_SUR_INGRESS="utilization.dr.zenko.local"' >> "$GITHUB_ENV" MONGODB_ROOT_USERNAME="${MONGODB_ROOT_USERNAME:-'root'}" MONGODB_ROOT_PASSWORD="${MONGODB_ROOT_PASSWORD:-'rootpass'}" diff --git a/.github/scripts/end2end/run-e2e-ctst.sh b/.github/scripts/end2end/run-e2e-ctst.sh index f336f01b2..2633b5859 100755 --- a/.github/scripts/end2end/run-e2e-ctst.sh +++ b/.github/scripts/end2end/run-e2e-ctst.sh @@ -74,6 +74,10 @@ BACKBEAT_API_PORT=$(kubectl get secret -l app.kubernetes.io/name=connector-cloud KAFKA_CLEANER_INTERVAL=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.kafkaCleaner.interval}') SORBETD_RESTORE_TIMEOUT=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.sorbet.server.azure.restoreTimeout}') +# Utilization service +UTILIZATION_SERVICE_HOST=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.scuba.api.ingress.hostname}') +UTILIZATION_SERVICE_PORT="80" + # Setting CTST world params WORLD_PARAMETERS="$(jq -c < { + const { gherkinDocument, pickle } = scenarioConfiguration; + const featureName = gherkinDocument.feature?.name?.replace(/ /g, '-').toLowerCase() || 'metrics'; + const filePath = `/tmp/${featureName}`; + let initiated = false; + let releaseLock: (() => Promise) | false = false; + const output: Record = {}; + + const { + versioning = '', + jobName = 'end2end-ops-count-items', + jobNamespace = `${featureName}-setup` + } = options; + + if (!fs.existsSync(filePath)) { + fs.writeFileSync(filePath, JSON.stringify({ + ready: false, + })); + } else { + initiated = true; + } + + if (!initiated) { + try { + releaseLock = await lockFile.lock(filePath, { stale: Constants.DEFAULT_TIMEOUT / 2 }); + } catch (err) { + world.logger.error('Unable to acquire lock', { err }); + releaseLock = false; + } + } + + if (releaseLock) { + const scenarioIds = new Set(); + + for (const scenario of gherkinDocument.feature?.children || []) { + for (const example of scenario.scenario?.examples || []) { + for (const values of example.tableBody || []) { + const scenarioWithExampleID = hashStringAndKeepFirst20Characters(`${values.id}`); + scenarioIds.add(scenarioWithExampleID); + } + } + } + + for (const scenarioId of scenarioIds) { + await world.createAccount(scenarioId, true); + await createBucketWithConfiguration(world, scenarioId, versioning); + await putObject(world); + output[scenarioId] = Identity.getCurrentCredentials()!; + } + + await createJobAndWaitForCompletion(world, jobName, jobNamespace); + + await Utils.sleep(2000); + fs.writeFileSync(filePath, JSON.stringify({ + ready: true, + ...output, + })); + + await releaseLock(); + } else { + while (!fs.existsSync(filePath)) { + await Utils.sleep(100); + } + + let configuration: { ready: boolean } = JSON.parse(fs.readFileSync(filePath, 'utf8')) as { ready: boolean }; + while (!configuration.ready) { + await Utils.sleep(100); + configuration = JSON.parse(fs.readFileSync(filePath, 'utf8')) as { ready: boolean }; + } + } + + const configuration: typeof output = JSON.parse(fs.readFileSync(filePath, 'utf8')) as typeof output; + const key = hashStringAndKeepFirst20Characters(`${pickle.astNodeIds[1]}`); + world.logger.debug('Scenario key', { key, from: `${pickle.astNodeIds[1]}`, configuration }); + + world.addToSaved('bucketName', key); + world.addToSaved('accountName', key); + world.addToSaved('accountNameForScenario', key); + world.addToSaved('metricsEnvironmentSetup', true); + + if (configuration[key]) { + Identity.addIdentity(IdentityEnum.ACCOUNT, key, configuration[key], undefined, true, true); + } +} diff --git a/tests/ctst/features/cloudserverAuth.feature b/tests/ctst/features/cloudserverAuth.feature index 959a9cbf0..a03eed2b0 100644 --- a/tests/ctst/features/cloudserverAuth.feature +++ b/tests/ctst/features/cloudserverAuth.feature @@ -5,9 +5,9 @@ Feature: AWS S3 Bucket operations @Cloudserver-Auth Scenario: Check Authentication on bucket object lock actions with Vault Given a IAM_USER type - And an IAM policy attached to the entity "user" with "Allow" effect to perform "CreateBucket" on "*" - And an IAM policy attached to the entity "user" with "" effect to perform "PutBucketObjectLockConfiguration" on "*" - And an IAM policy attached to the entity "user" with "" effect to perform "PutBucketVersioning" on "*" + And an IAM policy attached to the entity "user" with "Allow" effect to perform "s3" "CreateBucket" on "arn:aws:s3:::*" + And an IAM policy attached to the entity "user" with "" effect to perform "s3" "PutBucketObjectLockConfiguration" on "arn:aws:s3:::*" + And an IAM policy attached to the entity "user" with "" effect to perform "s3" "PutBucketVersioning" on "arn:aws:s3:::*" When the user tries to perform CreateBucket Then it "" pass Vault authentication @@ -24,9 +24,9 @@ Feature: AWS S3 Bucket operations Scenario: Check Authentication on bucket retention actions with Vault Given an existing bucket "" "without" versioning, "with" ObjectLock "GOVERNANCE" retention mode And a IAM_USER type - And an IAM policy attached to the entity "user" with "Allow" effect to perform "PutObject" on "*" - And an IAM policy attached to the entity "user" with "Allow" effect to perform "PutObjectRetention" on "*" - And an IAM policy attached to the entity "user" with "" effect to perform "BypassGovernanceRetention" on "*" + And an IAM policy attached to the entity "user" with "Allow" effect to perform "s3" "PutObject" on "arn:aws:s3:::*" + And an IAM policy attached to the entity "user" with "Allow" effect to perform "s3" "PutObjectRetention" on "arn:aws:s3:::*" + And an IAM policy attached to the entity "user" with "" effect to perform "s3" "BypassGovernanceRetention" on "arn:aws:s3:::*" And an object "" that "exists" When the user tries to perform PutObjectRetention "" bypass Then it "" pass Vault authentication @@ -44,15 +44,15 @@ Feature: AWS S3 Bucket operations Scenario: Check Authentication on DeleteObjects with Vault Given an existing bucket "" "without" versioning, "without" ObjectLock "without" retention mode And a IAM_USER type - And an IAM policy attached to the entity "user" with "Allow" effect to perform "PutObject" on "*" - And an IAM policy attached to the entity "user" with "Allow" effect to perform "DeleteObject" on "" - And an IAM policy attached to the entity "user" with "" effect to perform "DeleteObject" on "" + And an IAM policy attached to the entity "user" with "Allow" effect to perform "s3" "PutObject" on "arn:aws:s3:::*" + And an IAM policy attached to the entity "user" with "Allow" effect to perform "s3" "DeleteObject" on "" + And an IAM policy attached to the entity "user" with "" effect to perform "s3" "DeleteObject" on "" And an object "" that "exists" And an object "" that "exists" When the user tries to perform DeleteObjects Then it "" pass Vault authentication Examples: - | bucketName | objName1 | objName2 | resource1 | resource2 | allow | should | - | ca-do-bucket-1 | obj1 | obj2 | ca-do-bucket-1/obj1 | ca-do-bucket-1/obj2 | Allow | should | - | ca-do-bucket-2 | obj1 | obj2 | ca-do-bucket-2/obj1 | ca-do-bucket-2/obj2 | Deny | should not | + | bucketName | objName1 | objName2 | resource1 | resource2 | allow | should | + | ca-do-bucket-1 | obj1 | obj2 | arn:aws:s3:::ca-do-bucket-1/obj1 | arn:aws:s3:::ca-do-bucket-1/obj2 | Allow | should | + | ca-do-bucket-2 | obj1 | obj2 | arn:aws:s3:::ca-do-bucket-2/obj1 | arn:aws:s3:::ca-do-bucket-2/obj2 | Deny | should not | diff --git a/tests/ctst/features/iam-policies/AssumeRole.feature b/tests/ctst/features/iam-policies/AssumeRole.feature index 77d3ad95c..0b3a59401 100644 --- a/tests/ctst/features/iam-policies/AssumeRole.feature +++ b/tests/ctst/features/iam-policies/AssumeRole.feature @@ -27,22 +27,22 @@ Feature: IAM Policies for Assume Role Session Users Given an existing bucket "" "without" versioning, "without" ObjectLock "" retention mode And an object "" that "" And a "" AssumeRole user - And an IAM policy attached to the entity "role" with "Allow" effect to perform "" on "" + And an IAM policy attached to the entity "role" with "Allow" effect to perform "s3" "" on "" When the user tries to perform "" on the bucket Then the user should be able to perform successfully the "" action Examples: - | action | resource | bucketName | objectExists | objectName | ifCrossAccount | - | MetadataSearch | * | | does not exist | | | - | MetadataSearch | * | | does not exist | | cross account | - | GetObject | * | | exists | | | - | GetObject | * | | exists | | cross account | - | MetadataSearch | ar-md-bucket1 | ar-md-bucket1 | does not exist | | | - | MetadataSearch | ar-md-bucket2 | ar-md-bucket2 | does not exist | | cross account | - | GetObject | ar-go-bucket1/* | ar-go-bucket1 | exists | | | - | GetObject | ar-go-bucket2/* | ar-go-bucket2 | exists | | cross account | - | GetObject | ar-go-bucket3/go-object | ar-go-bucket3 | exists | go-object | | - | GetObject | ar-go-bucket4/go-object | ar-go-bucket4 | exists | go-object | cross account | + | action | resource | bucketName | objectExists | objectName | ifCrossAccount | + | MetadataSearch | arn:aws:s3:::* | | does not exist | | | + | MetadataSearch | arn:aws:s3:::* | | does not exist | | cross account | + | GetObject | arn:aws:s3:::* | | exists | | | + | GetObject | arn:aws:s3:::* | | exists | | cross account | + | MetadataSearch | arn:aws:s3:::ar-md-bucket1 | ar-md-bucket1 | does not exist | | | + | MetadataSearch | arn:aws:s3:::ar-md-bucket2 | ar-md-bucket2 | does not exist | | cross account | + | GetObject | arn:aws:s3:::ar-go-bucket1/* | ar-go-bucket1 | exists | | | + | GetObject | arn:aws:s3:::ar-go-bucket2/* | ar-go-bucket2 | exists | | cross account | + | GetObject | arn:aws:s3:::ar-go-bucket3/go-object | ar-go-bucket3 | exists | go-object | | + | GetObject | arn:aws:s3:::ar-go-bucket4/go-object | ar-go-bucket4 | exists | go-object | cross account | @2.6.0 @PreMerge @@ -51,28 +51,28 @@ Feature: IAM Policies for Assume Role Session Users Given an existing bucket "" "without" versioning, "without" ObjectLock "" retention mode And an object "" that "" And a "" AssumeRole user - And an IAM policy attached to the entity "role" with "" effect to perform "" on "" + And an IAM policy attached to the entity "role" with "" effect to perform "s3" "" on "" When the user tries to perform "" on the bucket Then the user should receive "AccessDenied" error Examples: - | action | effect | resource | bucketName | objectExists | objectName | ifCrossAccount | - | MetadataSearch | Allow | ar-md-bucket3-1 | ar-md-bucket3 | does not exist | | | - | MetadataSearch | Allow | ar-md-bucket4-1 | ar-md-bucket4 | does not exist | | cross account | - | MetadataSearch | Deny | * | | does not exist | | | - | MetadataSearch | Deny | * | | does not exist | | cross account | - | MetadataSearch | Deny | ar-md-bucket5 | ar-md-bucket5 | does not exist | | | - | MetadataSearch | Deny | ar-md-bucket6 | ar-md-bucket6 | does not exist | | cross account | - | GetObject | Allow | ar-go-bucket5-1/* | ar-go-bucket5 | exists | | | - | GetObject | Allow | ar-go-bucket6-1/* | ar-go-bucket6 | exists | | cross account | - | GetObject | Allow | ar-go-bucket7/go-object1 | ar-go-bucket7 | exists | go-object | | - | GetObject | Allow | ar-go-bucket8/go-object1 | ar-go-bucket8 | exists | go-object | cross account | - | GetObject | Deny | * | ar-go-bucket9 | exists | | | - | GetObject | Deny | * | ar-go-bucket10 | exists | | cross account | - | GetObject | Deny | ar-go-bucket11/* | ar-go-bucket11 | exists | | | - | GetObject | Deny | ar-go-bucket12/* | ar-go-bucket12 | exists | | cross account | - | GetObject | Deny | ar-go-bucket13/go-object | ar-go-bucket13 | exists | go-object | | - | GetObject | Deny | ar-go-bucket14/go-object | ar-go-bucket14 | exists | go-object | cross account | + | action | effect | resource | bucketName | objectExists | objectName | ifCrossAccount | + | MetadataSearch | Allow | arn:aws:s3:::ar-md-bucket3-1 | ar-md-bucket3 | does not exist | | | + | MetadataSearch | Allow | arn:aws:s3:::ar-md-bucket4-1 | ar-md-bucket4 | does not exist | | cross account | + | MetadataSearch | Deny | arn:aws:s3:::* | | does not exist | | | + | MetadataSearch | Deny | arn:aws:s3:::* | | does not exist | | cross account | + | MetadataSearch | Deny | arn:aws:s3:::ar-md-bucket5 | ar-md-bucket5 | does not exist | | | + | MetadataSearch | Deny | arn:aws:s3:::ar-md-bucket6 | ar-md-bucket6 | does not exist | | cross account | + | GetObject | Allow | arn:aws:s3:::ar-go-bucket5-1/* | ar-go-bucket5 | exists | | | + | GetObject | Allow | arn:aws:s3:::ar-go-bucket6-1/* | ar-go-bucket6 | exists | | cross account | + | GetObject | Allow | arn:aws:s3:::ar-go-bucket7/go-object1 | ar-go-bucket7 | exists | go-object | | + | GetObject | Allow | arn:aws:s3:::ar-go-bucket8/go-object1 | ar-go-bucket8 | exists | go-object | cross account | + | GetObject | Deny | arn:aws:s3:::* | ar-go-bucket9 | exists | | | + | GetObject | Deny | arn:aws:s3:::* | ar-go-bucket10 | exists | | cross account | + | GetObject | Deny | arn:aws:s3:::ar-go-bucket11/* | ar-go-bucket11 | exists | | | + | GetObject | Deny | arn:aws:s3:::ar-go-bucket12/* | ar-go-bucket12 | exists | | cross account | + | GetObject | Deny | arn:aws:s3:::ar-go-bucket13/go-object | ar-go-bucket13 | exists | go-object | | + | GetObject | Deny | arn:aws:s3:::ar-go-bucket14/go-object | ar-go-bucket14 | exists | go-object | cross account | @2.6.0 @PreMerge @@ -81,19 +81,19 @@ Feature: IAM Policies for Assume Role Session Users Given an existing bucket "" "without" versioning, "without" ObjectLock "" retention mode And an object "" that "" And a "" AssumeRole user - And an IAM policy attached to the entity "role" with "Allow" effect to perform "" on "" - And an IAM policy attached to the entity "role" with "Deny" effect to perform "" on "" + And an IAM policy attached to the entity "role" with "Allow" effect to perform "s3" "" on "" + And an IAM policy attached to the entity "role" with "Deny" effect to perform "s3" "" on "" When the user tries to perform "" on the bucket Then the user should receive "AccessDenied" error Examples: - | action | resource | bucketName | objectExists | objectName | ifCrossAccount | - | MetadataSearch | * | ar-md-bucket7 | does not exist | | | - | MetadataSearch | * | ar-md-bucket8 | does not exist | | cross account | - | MetadataSearch | ar-md-bucket9 | ar-md-bucket9 | does not exist | | | - | MetadataSearch | ar-md-bucket10 | ar-md-bucket10 | does not exist | | cross account | - | GetObject | * | ar-go-bucket15 | exists | | | - | GetObject | * | ar-go-bucket16 | exists | | cross account | - | GetObject | ar-go-bucket17/* | ar-go-bucket17 | exists | | | - | GetObject | ar-go-bucket18/* | ar-go-bucket18 | exists | | cross account | - | GetObject | ar-go-bucket19/go-object | ar-go-bucket19 | exists | go-object | | - | GetObject | ar-go-bucket20/go-object | ar-go-bucket20 | exists | go-object | cross account | + | action | resource | bucketName | objectExists | objectName | ifCrossAccount | + | MetadataSearch | arn:aws:s3:::* | ar-md-bucket7 | does not exist | | | + | MetadataSearch | arn:aws:s3:::* | ar-md-bucket8 | does not exist | | cross account | + | MetadataSearch | arn:aws:s3:::ar-md-bucket9 | ar-md-bucket9 | does not exist | | | + | MetadataSearch | arn:aws:s3:::ar-md-bucket10 | ar-md-bucket10 | does not exist | | cross account | + | GetObject | arn:aws:s3:::* | ar-go-bucket15 | exists | | | + | GetObject | arn:aws:s3:::* | ar-go-bucket16 | exists | | cross account | + | GetObject | arn:aws:s3:::ar-go-bucket17/* | ar-go-bucket17 | exists | | | + | GetObject | arn:aws:s3:::ar-go-bucket18/* | ar-go-bucket18 | exists | | cross account | + | GetObject | arn:aws:s3:::ar-go-bucket19/go-object | ar-go-bucket19 | exists | go-object | | + | GetObject | arn:aws:s3:::ar-go-bucket20/go-object | ar-go-bucket20 | exists | go-object | cross account | diff --git a/tests/ctst/features/iam-policies/IAMUser.feature b/tests/ctst/features/iam-policies/IAMUser.feature index 0c7452175..9f2aaaf1c 100644 --- a/tests/ctst/features/iam-policies/IAMUser.feature +++ b/tests/ctst/features/iam-policies/IAMUser.feature @@ -24,17 +24,17 @@ Feature: IAM Policies for IAM Users Given an existing bucket "" "without" versioning, "without" ObjectLock "" retention mode And an object "" that "" And a IAM_USER type - And an IAM policy attached to the entity "user" with "Allow" effect to perform "" on "" + And an IAM policy attached to the entity "user" with "Allow" effect to perform "s3" "" on "" When the user tries to perform "" on the bucket Then the user should be able to perform successfully the "" action Examples: - | action | resource | bucketName | objectExists | objectName | - | MetadataSearch | * | | does not exist | | - | GetObject | * | | exists | | - | MetadataSearch | iu-md-bucket1 | iu-md-bucket1 | does not exist | | - | GetObject | iu-go-bucket1/* | iu-go-bucket1 | exists | | - | GetObject | iu-go-bucket2/go-object | iu-go-bucket2 | exists | go-object | + | action | resource | bucketName | objectExists | objectName | + | MetadataSearch | arn:aws:s3:::* | | does not exist | | + | GetObject | arn:aws:s3:::* | | exists | | + | MetadataSearch | arn:aws:s3:::iu-md-bucket1 | iu-md-bucket1 | does not exist | | + | GetObject | arn:aws:s3:::iu-go-bucket1/* | iu-go-bucket1 | exists | | + | GetObject | arn:aws:s3:::iu-go-bucket2/go-object | iu-go-bucket2 | exists | go-object | @2.6.0 @PreMerge @@ -43,20 +43,20 @@ Feature: IAM Policies for IAM Users Given an existing bucket "" "without" versioning, "without" ObjectLock "" retention mode And an object "" that "" And a IAM_USER type - And an IAM policy attached to the entity "user" with "" effect to perform "" on "" + And an IAM policy attached to the entity "user" with "" effect to perform "s3" "" on "" When the user tries to perform "" on the bucket Then the user should receive "AccessDenied" error Examples: - | action | effect | resource | bucketName | objectExists | objectName | - | MetadataSearch | Allow | iu-md-bucket3-1 | iu-md-bucket3 | does not exist | | - | MetadataSearch | Deny | * | | does not exist | | - | MetadataSearch | Deny | iu-md-bucket4 | iu-md-bucket4 | does not exist | | - | GetObject | Allow | iu-go-bucket3-1/* | iu-go-bucket3 | exists | | - | GetObject | Allow | iu-go-bucket4/go-object1 | iu-go-bucket4 | exists | go-object | - | GetObject | Deny | * | iu-go-bucket5 | exists | | - | GetObject | Deny | iu-go-bucket6/* | iu-go-bucket6 | exists | | - | GetObject | Deny | iu-go-bucket7/go-object | iu-go-bucket7 | exists | go-object | + | action | effect | resource | bucketName | objectExists | objectName | + | MetadataSearch | Allow | arn:aws:s3:::iu-md-bucket3-1 | iu-md-bucket3 | does not exist | | + | MetadataSearch | Deny | arn:aws:s3:::* | | does not exist | | + | MetadataSearch | Deny | arn:aws:s3:::iu-md-bucket4 | iu-md-bucket4 | does not exist | | + | GetObject | Allow | arn:aws:s3:::iu-go-bucket3-1/* | iu-go-bucket3 | exists | | + | GetObject | Allow | arn:aws:s3:::iu-go-bucket4/go-object1 | iu-go-bucket4 | exists | go-object | + | GetObject | Deny | arn:aws:s3:::* | iu-go-bucket5 | exists | | + | GetObject | Deny | arn:aws:s3:::iu-go-bucket6/* | iu-go-bucket6 | exists | | + | GetObject | Deny | arn:aws:s3:::iu-go-bucket7/go-object | iu-go-bucket7 | exists | go-object | @2.6.0 @PreMerge @@ -65,14 +65,14 @@ Feature: IAM Policies for IAM Users Given an existing bucket "" "without" versioning, "without" ObjectLock "" retention mode And an object "" that "" And a IAM_USER type - And an IAM policy attached to the entity "user" with "Allow" effect to perform "" on "" - And an IAM policy attached to the entity "user" with "Deny" effect to perform "" on "" + And an IAM policy attached to the entity "user" with "Allow" effect to perform "s3" "" on "" + And an IAM policy attached to the entity "user" with "Deny" effect to perform "s3" "" on "" When the user tries to perform "" on the bucket Then the user should receive "AccessDenied" error Examples: - | action | resource | bucketName | objectExists | objectName | - | MetadataSearch | * | iu-md-bucket5 | does not exist | | - | MetadataSearch | iu-md-bucket6 | iu-md-bucket6 | does not exist | | - | GetObject | * | iu-go-bucket8 | exists | | - | GetObject | iu-go-bucket9/* | iu-go-bucket9 | exists | | - | GetObject | iu-go-bucket10/go-object | iu-go-bucket10 | exists | go-object | + | action | resource | bucketName | objectExists | objectName | + | MetadataSearch | arn:aws:s3:::* | iu-md-bucket5 | does not exist | | + | MetadataSearch | arn:aws:s3:::iu-md-bucket6 | iu-md-bucket6 | does not exist | | + | GetObject | arn:aws:s3:::* | iu-go-bucket8 | exists | | + | GetObject | arn:aws:s3:::iu-go-bucket9/* | iu-go-bucket9 | exists | | + | GetObject | arn:aws:s3:::iu-go-bucket10/go-object | iu-go-bucket10 | exists | go-object | diff --git a/tests/ctst/features/quotas/Quotas.feature b/tests/ctst/features/quotas/Quotas.feature index 4ba5e3f40..18ec0f363 100644 --- a/tests/ctst/features/quotas/Quotas.feature +++ b/tests/ctst/features/quotas/Quotas.feature @@ -8,7 +8,8 @@ Feature: Quota Management for APIs @CronJob @DataWrite Scenario Outline: Quotas are evaluated during write operations - Given an action "" + Given the environment is set up with bucket created with data, and count-items created the metrics + And an action "" And an upload size of B for the object "" And a STORAGE_MANAGER type And a bucket quota set to B @@ -69,7 +70,8 @@ Feature: Quota Management for APIs @DataDeletion @NonVersioned Scenario Outline: Quotas are affected by deletion operations - Given an action "DeleteObject" + Given the environment is set up with bucket created with data, and count-items created the metrics + And an action "DeleteObject" And a permission to perform the "PutObject" action And a STORAGE_MANAGER type And a bucket quota set to B @@ -102,7 +104,8 @@ Feature: Quota Management for APIs @DataDeletion @NonVersioned Scenario Outline: Quotas are affected by deletion operations between count items runs - Given an action "DeleteObject" + Given the environment is set up with bucket created with data, and count-items created the metrics + And an action "DeleteObject" And a permission to perform the "PutObject" action And a STORAGE_MANAGER type And a bucket quota set to 1000 B @@ -139,7 +142,8 @@ Feature: Quota Management for APIs @DataDeletion @NonVersioned Scenario Outline: Negative inflights do not allow to bypass the quota - Given an action "DeleteObject" + Given the environment is set up with bucket created with data, and count-items created the metrics + And an action "DeleteObject" And a permission to perform the "PutObject" action And a STORAGE_MANAGER type And a bucket quota set to B @@ -174,7 +178,8 @@ Feature: Quota Management for APIs @Dmf @ColdStorage Scenario Outline: Object restoration implements strict quotas - Given an action "" + Given the environment is set up with bucket created with data, and count-items created the metrics + And an action "" And a STORAGE_MANAGER type And a transition workflow to "e2e-cold" location And an upload size of B for the object "" @@ -208,7 +213,8 @@ Feature: Quota Management for APIs @ColdStorage @Only Scenario Outline: Restored object expiration updates quotas - Given an action "" + Given the environment is set up with bucket created with data, and count-items created the metrics + And an action "" And a STORAGE_MANAGER type And a transition workflow to "e2e-cold" location And an upload size of B for the object "obj-1" diff --git a/tests/ctst/features/utilization/UtilizationAPIs.feature b/tests/ctst/features/utilization/UtilizationAPIs.feature new file mode 100644 index 000000000..3f947d653 --- /dev/null +++ b/tests/ctst/features/utilization/UtilizationAPIs.feature @@ -0,0 +1,69 @@ +Feature: Scality Utilization Reporting (SUR) API + The utilization metrics API allows different authorized users to retrieve + metrics for buckets, accounts, and locations. + + @2.11.0 + @PreMerge + @Utilization + @UtilizationAPI + Scenario Outline: Default roles can retrieve utilization metrics + Given the environment is set up with bucket created, test data uploaded, and count-items ran + And a STORAGE_MANAGER type + When the user retrieves utilization metrics using scubaclient for metric type "" + Then the latest utilization metrics are retrieved + + Examples: + | metricType | + | bucket | + | account | + | location | + + @2.11.0 + @PreMerge + @Utilization + @UtilizationAPI + Scenario Outline: IAM users with correct permissions can retrieve utilization metrics + Given the environment is set up with bucket created, test data uploaded, and count-items ran + And a IAM_USER type + And an IAM policy attached to the entity "user" with "Allow" effect to perform "sur" "GetMetrics" on "arn:scality:sur:::*" + When the user retrieves utilization metrics using scubaclient for metric type "" + Then the latest utilization metrics are retrieved + + Examples: + | metricType | + | bucket | + | account | + | location | + + @2.11.0 + @PreMerge + @Utilization + @UtilizationAPI + Scenario Outline: Unauthorized users cannot retrieve utilization metrics + Given the environment is set up with bucket created, test data uploaded, and count-items ran + And a IAM_USER type + When the user retrieves utilization metrics using scubaclient for metric type "" + Then the user should receive "403" error + + Examples: + | metricType | + | bucket | + | account | + | location | + + @2.11.0 + @PreMerge + @Utilization + @UtilizationAPI + Scenario Outline: IAM users with explicit deny policy cannot retrieve utilization metrics + Given the environment is set up with bucket created, test data uploaded, and count-items ran + And a IAM_USER type + And an IAM policy attached to the entity "user" with "Deny" effect to perform "sur" "GetMetrics" on "arn:scality:sur:::*" + When the user retrieves utilization metrics using scubaclient for metric type "" + Then the user should receive "403" error + + Examples: + | metricType | + | bucket | + | account | + | location | diff --git a/tests/ctst/package.json b/tests/ctst/package.json index 7f942831c..d6a284c4d 100644 --- a/tests/ctst/package.json +++ b/tests/ctst/package.json @@ -17,7 +17,8 @@ "node-gyp": "^10.2.0", "prometheus-query": "^3.4.0", "proper-lockfile": "^4.1.2", - "qs": "^6.13.0" + "qs": "^6.13.0", + "scubaclient": "git+https://github.com/scality/scubaclient#^1.1.1" }, "devDependencies": { "@aws-sdk/client-iam": "^3.582.0", diff --git a/tests/ctst/steps/iam-policies/IAMUser.ts b/tests/ctst/steps/iam-policies/IAMUser.ts index 5aad382a1..037bb53c8 100644 --- a/tests/ctst/steps/iam-policies/IAMUser.ts +++ b/tests/ctst/steps/iam-policies/IAMUser.ts @@ -3,8 +3,8 @@ import { Constants, IAM, Identity, Utils } from 'cli-testing'; import { extractPropertyFromResults } from '../../common/utils'; import Zenko from 'world/Zenko'; -Given('an IAM policy attached to the entity {string} with {string} effect to perform {string} on {string}', - async function (this: Zenko, entity: string, effect: string, action: string, resource: string) { +Given('an IAM policy attached to the entity {string} with {string} effect to perform {string} {string} on {string}', + async function (this: Zenko, entity: string, effect: string, service: string, action: string, resource: string) { Identity.resetIdentity(); this.resetCommand(); this.addToSaved('action', action); @@ -16,8 +16,8 @@ Given('an IAM policy attached to the entity {string} with {string} effect to per Statement: [ { Effect: effect === 'Allow' ? 'Allow' : 'Deny', - Action: `s3:${action}`, - Resource: `arn:aws:s3:::${resource}`, + Action: `${service}:${action}`, + Resource: resource, }, ], }), diff --git a/tests/ctst/steps/quotas/quotas.ts b/tests/ctst/steps/quotas/quotas.ts index 850119f90..1862b4576 100644 --- a/tests/ctst/steps/quotas/quotas.ts +++ b/tests/ctst/steps/quotas/quotas.ts @@ -1,11 +1,7 @@ -import fs from 'fs'; -import lockFile from 'proper-lockfile'; import { Given, When, ITestCaseHookParameter } from '@cucumber/cucumber'; import Zenko, { EntityType } from '../../world/Zenko'; -import { Scality, Command, Utils, AWSCredentials, Constants, Identity, IdentityEnum } from 'cli-testing'; -import { createJobAndWaitForCompletion } from '../utils/kubernetes'; -import { createBucketWithConfiguration, putObject } from '../utils/utils'; -import { hashStringAndKeepFirst20Characters } from 'common/utils'; +import { Scality, Command, Utils } from 'cli-testing'; +import { prepareMetricsScenarios } from '../../common/utils'; import assert from 'assert'; export async function prepareQuotaScenarios(world: Zenko, scenarioConfiguration: ITestCaseHookParameter) { @@ -17,77 +13,15 @@ export async function prepareQuotaScenarios(world: Zenko, scenarioConfiguration: * * The hook is called in the hooks.ts file. */ - const { gherkinDocument, pickle } = scenarioConfiguration; - let initiated = false; - let releaseLock: (() => Promise) | false = false; - const output: Record = {}; - - const featureName = gherkinDocument.feature?.name?.replace(/ /g, '-').toLowerCase() || 'quotas'; - const filePath = `/tmp/${featureName}`; - - if (!fs.existsSync(filePath)) { - fs.writeFileSync(filePath, JSON.stringify({ - ready: false, - })); - } else { - initiated = true; - } - - if (!initiated) { - try { - releaseLock = await lockFile.lock(filePath, { stale: Constants.DEFAULT_TIMEOUT / 2 }); - } catch (err) { - world.logger.error('Unable to acquire lock', { err }); - releaseLock = false; - } - } - - if (releaseLock) { - const isBucketNonVersioned = gherkinDocument.feature?.tags?.find( - tag => tag.name === 'NonVersioned') === undefined; - for (const scenario of gherkinDocument.feature?.children || []) { - for (const example of scenario.scenario?.examples || []) { - for (const values of example.tableBody || []) { - const scenarioWithExampleID = hashStringAndKeepFirst20Characters(`${values.id}`); - await world.createAccount(scenarioWithExampleID, true); - await createBucketWithConfiguration(world, scenarioWithExampleID, - isBucketNonVersioned ? '' : 'with'); - await putObject(world); - output[scenarioWithExampleID] = Identity.getCurrentCredentials()!; - } - } - } - - await createJobAndWaitForCompletion(world, 'end2end-ops-count-items', 'quotas-setup'); - // This 2s sleep ensures that the cloudserver instances detected - // the metrics successfully, which enables the quotas. - await Utils.sleep(2000); - fs.writeFileSync(filePath, JSON.stringify({ - ready: true, - ...output, - })); - - await releaseLock(); - } else { - while (!fs.existsSync(filePath)) { - await Utils.sleep(100); - } - - let configuration: { ready: boolean } = JSON.parse(fs.readFileSync(filePath, 'utf8')) as { ready: boolean }; - while (!configuration.ready) { - await Utils.sleep(100); - configuration = JSON.parse(fs.readFileSync(filePath, 'utf8')) as { ready: boolean }; - } - } - - const configuration: typeof output = JSON.parse(fs.readFileSync(`/tmp/${featureName}`, 'utf8')) as typeof output; - const key = hashStringAndKeepFirst20Characters(`${pickle.astNodeIds[1]}`); - world.logger.debug('Scenario key', { key, from: `${pickle.astNodeIds[1]}`, configuration }); - // Save the bucket name for the scenario - world.addToSaved('bucketName', key); - world.addToSaved('accountName', key); - // Save the account name for the scenario - Identity.addIdentity(IdentityEnum.ACCOUNT, key, configuration[key], undefined, true, true); + const isBucketNonVersioned = scenarioConfiguration.gherkinDocument.feature?.tags?.find( + tag => tag.name === 'NonVersioned') === undefined; + + const versioning = isBucketNonVersioned ? '' : 'with'; + + await prepareMetricsScenarios(world, scenarioConfiguration, { + versioning, + jobNamespace: 'quotas-setup', + }); } export async function teardownQuotaScenarios(world: Zenko) { diff --git a/tests/ctst/steps/setup.ts b/tests/ctst/steps/setup.ts new file mode 100644 index 000000000..5c4fbfc05 --- /dev/null +++ b/tests/ctst/steps/setup.ts @@ -0,0 +1,12 @@ +import { Given } from '@cucumber/cucumber'; +import assert from 'assert'; +import Zenko from 'world/Zenko'; + +// This step is a generic placeholder to check that the environment is set up +// while allowing to write specific steps for each feature. +// The associated setup logic is called in the tests/ctst/common/hooks.ts file +// for the matching feature tags. +Given(/^.*environment is set up.*$/, function (this: Zenko) { + const isSetup = this.getSaved('metricsEnvironmentSetup'); + assert.strictEqual(isSetup, true, 'Metrics environment should be set up by the Before hook'); +}); diff --git a/tests/ctst/steps/utilization/utilizationAPI.ts b/tests/ctst/steps/utilization/utilizationAPI.ts new file mode 100644 index 000000000..f76cef9a7 --- /dev/null +++ b/tests/ctst/steps/utilization/utilizationAPI.ts @@ -0,0 +1,97 @@ +import { When, Then, ITestCaseHookParameter } from '@cucumber/cucumber'; +import { strict as assert } from 'assert'; +import Zenko from '../../world/Zenko'; +import { Command } from 'cli-testing'; +import { Identity } from 'cli-testing'; +import ScubaClient, { ScubaMetrics } from 'scubaclient'; +import { prepareMetricsScenarios } from '../../common/utils'; + +export async function prepareUtilizationScenarios(world: Zenko, scenarioConfiguration: ITestCaseHookParameter) { + await prepareMetricsScenarios(world, scenarioConfiguration, { + versioning: '', + jobNamespace: 'utilization-setup', + jobName: 'end2end-ops-count-items' + }); +} + +When('the user retrieves utilization metrics using scubaclient for metric type {string}', + async function (this: Zenko, metricType: string) { + const accountName = this.getSaved('accountName'); + + const userCredentials = Identity.getCurrentCredentials(); + + if (!userCredentials) { + throw new Error('User credentials not found'); + } + + this.addToSaved('metricType', metricType); + + const client = new ScubaClient({ + port: parseInt(this.parameters.UtilizationServicePort), + host: this.parameters.UtilizationServiceHost, + useHttps: false, + auth: { + awsV4: { + credentials: { + accessKeyId: userCredentials.accessKeyId, + secretAccessKey: userCredentials.secretAccessKey, + sessionToken: userCredentials.sessionToken, + }, + region: 'us-east-1', + service: 's3', + }, + }, + }); + + let metricName; + + switch (metricType) { + case 'bucket': + metricName = this.getSaved('bucketName'); + break; + case 'account': + metricName = accountName; + break; + case 'location': + metricName = this.getSaved('locationName') || 'us-east-1'; + break; + default: + throw new Error(`Unsupported metric type: ${metricType}`); + } + + try { + const response = await client.getLatestMetrics(metricType, metricName); + const command: Command = { + err: '', + stdout: JSON.stringify(response), + stderr: '', + }; + this.setResult(command); + } catch (err: any) { // eslint-disable-line @typescript-eslint/no-explicit-any + this.logger.debug('Error retrieving utilization metrics', { + err: err.message, + }); + this.setResult({ + err: err.message, + stdout: '', + stderr: err.message, + }); + } + }); + +Then('the latest utilization metrics are retrieved', + function (this: Zenko) { + const result = this.getResult(); + assert.strictEqual(result.err, '', `Expected no error but got: ${result.err}`); + + this.logger.debug('Utilization metrics', { + stdout: result.stdout, + stderr: result.stderr, + err: result.err, + }); + + const response = JSON.parse(result.stdout) as ScubaMetrics; + assert.ok(response.objectsTotal >= 0, 'Bucket metrics should contain objectCount'); + assert.ok(response.bytesTotal >= 0, 'Bucket metrics should contain bytesTotal'); + assert.ok(response.metricsClass === this.getSaved('metricType'), 'Metric type should match'); + }); diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index 9f22361eb..ef94be2d6 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -87,6 +87,8 @@ export interface ZenkoWorldParameters extends ClientOptions { BackbeatApiPort: string; KafkaCleanerInterval: string; SorbetdRestoreTimeout: string; + UtilizationServiceHost: string; + UtilizationServicePort: string; [key: string]: unknown; } diff --git a/tests/ctst/yarn.lock b/tests/ctst/yarn.lock index f8f4a38ab..954f78047 100644 --- a/tests/ctst/yarn.lock +++ b/tests/ctst/yarn.lock @@ -102,7 +102,7 @@ "@aws-sdk/types" "^3.222.0" tslib "^1.11.1" -"@aws-crypto/sha256-js@5.2.0": +"@aws-crypto/sha256-js@5.2.0", "@aws-crypto/sha256-js@^5.2.0": version "5.2.0" resolved "https://registry.yarnpkg.com/@aws-crypto/sha256-js/-/sha256-js-5.2.0.tgz#c4fdb773fdbed9a664fc1a95724e206cf3860042" integrity sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA== @@ -3403,6 +3403,19 @@ "@smithy/util-utf8" "^2.0.2" tslib "^2.5.0" +"@smithy/signature-v4@^2.1.1": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-2.3.0.tgz#c30dd4028ae50c607db99459981cce8cdab7a3fd" + integrity sha512-ui/NlpILU+6HAQBfJX8BBsDXuKSNrjTSuOYArRblcrErwKFutjrCNb/OExfVRyj9+26F9J+ZmfWT+fKWuDrH3Q== + dependencies: + "@smithy/is-array-buffer" "^2.2.0" + "@smithy/types" "^2.12.0" + "@smithy/util-hex-encoding" "^2.2.0" + "@smithy/util-middleware" "^2.2.0" + "@smithy/util-uri-escape" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" + "@smithy/signature-v4@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-3.0.0.tgz#f536d0abebfeeca8e9aab846a4042658ca07d3b7" @@ -4368,6 +4381,15 @@ axios@^1.6.0: form-data "^4.0.0" proxy-from-env "^1.1.0" +axios@^1.7.4: + version "1.8.4" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.8.4.tgz#78990bb4bc63d2cae072952d374835950a82f447" + integrity sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw== + dependencies: + follow-redirects "^1.15.6" + form-data "^4.0.0" + proxy-from-env "^1.1.0" + balanced-match@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" @@ -6581,6 +6603,14 @@ sax@>=0.6.0: resolved "https://registry.yarnpkg.com/sax/-/sax-1.4.1.tgz#44cc8988377f126304d3b3fc1010c733b929ef0f" integrity sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg== +"scubaclient@git+https://github.com/scality/scubaclient#^1.1.1": + version "1.1.1" + resolved "git+https://github.com/scality/scubaclient#c3a56f98768feac145b764402491967ee711063a" + dependencies: + "@aws-crypto/sha256-js" "^5.2.0" + "@smithy/signature-v4" "^2.1.1" + axios "^1.7.4" + seed-random@~2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/seed-random/-/seed-random-2.2.0.tgz#2a9b19e250a817099231a5b99a4daf80b7fbed54"