diff --git a/localstack/aws/api/s3control/__init__.py b/localstack/aws/api/s3control/__init__.py
index 3728bbb4e64d6..6e6c876feb774 100644
--- a/localstack/aws/api/s3control/__init__.py
+++ b/localstack/aws/api/s3control/__init__.py
@@ -92,6 +92,8 @@
TagValueString = str
TrafficDialPercentage = int
VpcId = str
+HostId = str
+URI = str
class AsyncOperationName(str):
@@ -430,6 +432,7 @@ class NoSuchPublicAccessBlockConfiguration(ServiceException):
code: str = "NoSuchPublicAccessBlockConfiguration"
sender_fault: bool = False
status_code: int = 404
+ AccountId: Optional[AccountId]
class NotFoundException(ServiceException):
@@ -450,6 +453,20 @@ class TooManyTagsException(ServiceException):
status_code: int = 400
+class NoSuchAccessPoint(ServiceException):
+ code: str = "NoSuchAccessPoint"
+ sender_fault: bool = False
+ status_code: int = 404
+ AccessPointName: Optional[AccessPointName]
+
+
+class InvalidURI(ServiceException):
+ code: str = "InvalidURI"
+ sender_fault: bool = False
+ status_code: int = 400
+ URI: Optional[URI]
+
+
class AbortIncompleteMultipartUpload(TypedDict, total=False):
DaysAfterInitiation: Optional[DaysAfterInitiation]
diff --git a/localstack/aws/protocol/serializer.py b/localstack/aws/protocol/serializer.py
index 471643bd20537..0291df5dace75 100644
--- a/localstack/aws/protocol/serializer.py
+++ b/localstack/aws/protocol/serializer.py
@@ -1586,6 +1586,53 @@ def _timestamp_iso8601(value: datetime) -> str:
return value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
+class S3ControlResponseSerializer(RestXMLResponseSerializer):
+ """
+ The ``S3ResponseSerializer`` adds some minor logic to handle S3 specific peculiarities with the error response
+ serialization and the root node tag.
+ """
+
+ def _serialize_error(
+ self,
+ error: ServiceException,
+ response: HttpResponse,
+ shape: StructureShape,
+ operation_model: OperationModel,
+ mime_type: str,
+ request_id: str,
+ ) -> None:
+ # Check if we need to add a namespace
+ attr = (
+ {"xmlns": operation_model.metadata.get("xmlNamespace")}
+ if "xmlNamespace" in operation_model.metadata
+ else {}
+ )
+ root = ETree.Element("ErrorResponse", attr)
+
+ error_tag = ETree.SubElement(root, "Error")
+ # the difference for S3Control is here: it adds additional error tags inside the Error tags, unlike other
+ # rest-xml services
+ self._add_error_tags(error, error_tag, mime_type)
+ request_id_element = ETree.SubElement(root, "RequestId")
+ request_id_element.text = request_id
+
+ host_id_element = ETree.SubElement(root, "HostId")
+ host_id_element.text = (
+ "9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg="
+ )
+
+ self._add_additional_error_tags(vars(error), error_tag, shape, mime_type)
+ response.set_response(self._encode_payload(self._node_to_string(root, mime_type)))
+
+ @staticmethod
+ def _timestamp_iso8601(value: datetime) -> str:
+ """
+ This is very specific to S3, S3 returns an ISO8601 timestamp but with milliseconds always set to 000
+ Some SDKs are very picky about the length
+ """
+ return value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
+
+
class SqsQueryResponseSerializer(QueryResponseSerializer):
"""
Unfortunately, SQS uses a rare interpretation of the XML protocol: It uses HTML entities within XML tag text nodes.
@@ -1761,6 +1808,7 @@ def create_serializer(service: ServiceModel) -> ResponseSerializer:
"sqs-query": SqsQueryResponseSerializer,
"sqs": SqsResponseSerializer,
"s3": S3ResponseSerializer,
+ "s3control": S3ControlResponseSerializer,
}
protocol_specific_serializers = {
"query": QueryResponseSerializer,
diff --git a/localstack/aws/spec-patches.json b/localstack/aws/spec-patches.json
index a327eebd71c77..241c97cec3c33 100644
--- a/localstack/aws/spec-patches.json
+++ b/localstack/aws/spec-patches.json
@@ -1153,5 +1153,69 @@
"exception": true
}
}
+ ],
+ "s3control/2018-08-20/service-2": [
+ {
+ "op": "add",
+ "path": "/operations/DeletePublicAccessBlock/http/responseCode",
+ "value": 204
+ },
+ {
+ "op": "add",
+ "path": "/shapes/HostId",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchPublicAccessBlockConfiguration/members/AccountId",
+ "value": {
+ "shape": "AccountId"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/NoSuchAccessPoint",
+ "value": {
+ "type": "structure",
+ "members": {
+ "AccessPointName": {
+ "shape": "AccessPointName"
+ }
+ },
+ "error": {
+ "httpStatusCode": 404
+ },
+ "documentation": "
The specified accesspoint does not exist
",
+ "exception": true
+ }
+ },
+ {
+ "op": "add",
+ "path": "/operations/DeleteAccessPoint/http/responseCode",
+ "value": 204
+ },
+ {
+ "op": "add",
+ "path": "/shapes/URI",
+ "value": {
+ "type": "string"
+ }
+ },
+ {
+ "op": "add",
+ "path": "/shapes/InvalidURI",
+ "value": {
+ "type": "structure",
+ "members": {
+ "URI": {
+ "shape": "URI"
+ }
+ },
+ "documentation": "Couldn't parse the specified URI.
",
+ "exception": true
+ }
+ }
]
}
diff --git a/localstack/config.py b/localstack/config.py
index 922142db7c311..e072c808db29c 100644
--- a/localstack/config.py
+++ b/localstack/config.py
@@ -423,6 +423,13 @@ def in_docker():
# whether the S3 legacy V2/ASF provider is enabled
LEGACY_V2_S3_PROVIDER = os.environ.get("PROVIDER_OVERRIDE_S3", "") in ("v2", "legacy_v2", "asf")
+# force the native provider for tests
+if not os.environ.get("PROVIDER_OVERRIDE_S3CONTROL"):
+ os.environ["PROVIDER_OVERRIDE_S3CONTROL"] = "v2"
+
+# whether the S3 Control native provider is enabled
+NATIVE_S3_CONTROL_PROVIDER = os.environ.get("PROVIDER_OVERRIDE_S3CONTROL", "") == "v2"
+
# Whether to report internal failures as 500 or 501 errors.
FAIL_FAST = is_env_true("FAIL_FAST")
diff --git a/localstack/services/providers.py b/localstack/services/providers.py
index f37d2a7a0f52c..221332db90c79 100644
--- a/localstack/services/providers.py
+++ b/localstack/services/providers.py
@@ -262,6 +262,14 @@ def s3control():
return Service.for_provider(provider, dispatch_table_factory=MotoFallbackDispatcher)
+@aws_provider(api="s3control", name="v2")
+def s3control_v2():
+ from localstack.services.s3control.v2.provider import S3ControlProvider
+
+ provider = S3ControlProvider()
+ return Service.for_provider(provider)
+
+
@aws_provider()
def scheduler():
from localstack.services.moto import MotoFallbackDispatcher
diff --git a/localstack/services/s3control/v2/__init__.py b/localstack/services/s3control/v2/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/localstack/services/s3control/v2/models.py b/localstack/services/s3control/v2/models.py
new file mode 100644
index 0000000000000..7bf0b7978e24d
--- /dev/null
+++ b/localstack/services/s3control/v2/models.py
@@ -0,0 +1,28 @@
+from localstack.aws.api.s3control import (
+ AccessPointName,
+ Alias,
+ BucketName,
+ GetAccessPointResult,
+ PublicAccessBlockConfiguration,
+)
+from localstack.services.stores import (
+ AccountRegionBundle,
+ BaseStore,
+ CrossAccountAttribute,
+ CrossRegionAttribute,
+ LocalAttribute,
+)
+
+
+class S3ControlStore(BaseStore):
+ # buckets: dict[BucketName, S3Bucket] = CrossRegionAttribute(default=dict)
+ public_access_block: PublicAccessBlockConfiguration = CrossRegionAttribute(default=dict)
+ access_points: dict[AccessPointName, GetAccessPointResult] = LocalAttribute(
+ default=dict
+ ) # TODO: check locality
+ # TODO: check for accross-region accesses
+ access_point_alias: dict[Alias, BucketName] = CrossAccountAttribute(default=dict)
+ # global_bucket_map: dict[BucketName, AccountId] = CrossAccountAttribute(default=dict)
+
+
+s3control_stores = AccountRegionBundle[S3ControlStore]("s3control", S3ControlStore)
diff --git a/localstack/services/s3control/v2/provider.py b/localstack/services/s3control/v2/provider.py
new file mode 100644
index 0000000000000..59c702ff7f5c4
--- /dev/null
+++ b/localstack/services/s3control/v2/provider.py
@@ -0,0 +1,274 @@
+import datetime
+import re
+from random import choices
+from string import ascii_lowercase, digits
+
+from botocore.exceptions import ClientError
+
+from localstack.aws.api import CommonServiceException, RequestContext
+from localstack.aws.api.s3control import (
+ AccessPoint,
+ AccessPointName,
+ AccountId,
+ BucketName,
+ CreateAccessPointResult,
+ GetAccessPointPolicyResult,
+ GetAccessPointPolicyStatusResult,
+ GetAccessPointResult,
+ GetPublicAccessBlockOutput,
+ InvalidURI,
+ ListAccessPointsResult,
+ MaxResults,
+ NetworkOrigin,
+ NonEmptyMaxLength1024String,
+ NoSuchAccessPoint,
+ NoSuchPublicAccessBlockConfiguration,
+ Policy,
+ PublicAccessBlockConfiguration,
+ S3ControlApi,
+ VpcConfiguration,
+)
+from localstack.aws.connect import connect_to
+from localstack.services.s3.utils import validate_dict_fields
+from localstack.services.s3control.v2.models import S3ControlStore, s3control_stores
+from localstack.utils.collections import select_from_typed_dict
+from localstack.utils.urls import localstack_host
+
+
+class MalformedXML(CommonServiceException):
+ def __init__(self, message=None):
+ if not message:
+ message = "The XML you provided was not well-formed or did not validate against our published schema"
+ super().__init__("MalformedXML", status_code=400, message=message)
+
+
+class InvalidRequest(CommonServiceException):
+ def __init__(self, message=None):
+ super().__init__("InvalidRequest", status_code=400, message=message)
+
+
+FAKE_HOST_ID = "9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg="
+PUBLIC_ACCESS_BLOCK_FIELDS = {
+ "BlockPublicAcls",
+ "BlockPublicPolicy",
+ "IgnorePublicAcls",
+ "RestrictPublicBuckets",
+}
+DEFAULT_ENDPOINTS = {
+ "dualstack": f"s3-accesspoint.dualstack..{localstack_host()}",
+ "fips": f"s3-accesspoint-fips..{localstack_host()}",
+ "fips_dualstack": f"s3-accesspoint-fips.dualstack..{localstack_host()}",
+ "ipv4": f"s3-accesspoint..{localstack_host()}",
+}
+
+ACCESS_POINT_REGEX = re.compile(r"^((?!xn--)(?!.*-s3alias$)[a-z0-9][a-z0-9-]{1,48}[a-z0-9])$")
+
+
+class S3ControlProvider(S3ControlApi):
+ """
+ Lots of S3 Control API methods are related to S3 Outposts (S3 in your own datacenter)
+ These are not implemented in this provider
+ Access Points limitations:
+ - https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points-restrictions-limitations.html
+ """
+
+ @staticmethod
+ def get_store(account_id: str, region_name: str) -> S3ControlStore:
+ return s3control_stores[account_id][region_name]
+
+ def put_public_access_block(
+ self,
+ context: RequestContext,
+ public_access_block_configuration: PublicAccessBlockConfiguration,
+ account_id: AccountId,
+ ) -> None:
+ # TODO: do some check between passed account_id and context.account_id, but this is IAM realm
+ # the region does not matter, everything is global
+ store = self.get_store(account_id, context.region)
+
+ if not validate_dict_fields(
+ public_access_block_configuration,
+ required_fields=set(),
+ optional_fields=PUBLIC_ACCESS_BLOCK_FIELDS,
+ ):
+ raise MalformedXML()
+
+ if not public_access_block_configuration:
+ raise InvalidRequest(
+ "Must specify at least one configuration.",
+ )
+
+ for field in PUBLIC_ACCESS_BLOCK_FIELDS:
+ if public_access_block_configuration.get(field) is None:
+ public_access_block_configuration[field] = False
+
+ store.public_access_block = public_access_block_configuration
+
+ def get_public_access_block(
+ self, context: RequestContext, account_id: AccountId
+ ) -> GetPublicAccessBlockOutput:
+ store = self.get_store(account_id, context.region)
+ if not store.public_access_block:
+ raise NoSuchPublicAccessBlockConfiguration(
+ "The public access block configuration was not found",
+ AccountId=account_id,
+ )
+
+ return GetPublicAccessBlockOutput(PublicAccessBlockConfiguration=store.public_access_block)
+
+ def delete_public_access_block(self, context: RequestContext, account_id: AccountId) -> None:
+ store = self.get_store(account_id, context.region)
+ store.public_access_block = None
+
+ def create_access_point(
+ self,
+ context: RequestContext,
+ account_id: AccountId,
+ name: AccessPointName,
+ bucket: BucketName,
+ vpc_configuration: VpcConfiguration = None,
+ public_access_block_configuration: PublicAccessBlockConfiguration = None,
+ bucket_account_id: AccountId = None,
+ ) -> CreateAccessPointResult:
+ # Access Point naming rules, see:
+ # https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-access-points.html#access-points-names
+
+ # TODO: support VpcConfiguration
+ # TODO: support PublicAccessBlockConfiguration
+ # TODO: check bucket_account_id
+
+ # TODO: access point might be region only?? test it
+ store = self.get_store(account_id, context.region)
+ if not ACCESS_POINT_REGEX.match(name):
+ if len(name) < 3 or len(name) > 50 or "_" in name or name.isupper():
+ raise InvalidURI(
+ "Couldn't parse the specified URI.",
+ URI=f"accesspoint/{name}",
+ )
+
+ raise InvalidRequest("Your Amazon S3 AccessPoint name is invalid")
+
+ if name in store.access_points:
+ # TODO: implement additional checks if the account id is different than the access point
+ raise CommonServiceException(
+ "AccessPointAlreadyOwnedByYou",
+ "Your previous request to create the named accesspoint succeeded and you already own it.",
+ status_code=409,
+ )
+
+ # TODO: what are the permissions to needed to create an AccessPoint to a bucket?
+ try:
+ connect_to(region_name=context.region).s3.head_bucket(Bucket=bucket)
+ except ClientError as e:
+ if e.response.get("Error", {}).get("Code") == "404":
+ raise InvalidRequest(
+ "Amazon S3 AccessPoint can only be created for existing bucket",
+ )
+ # TODO: find AccessDenied exception?
+ raise
+
+ alias = create_random_alias(name)
+
+ # if the PublicAccessBlockConfiguration is not set, then every field default to True
+ # else, it's set to False
+ is_pabc_none = public_access_block_configuration is None
+ public_access_block_configuration = public_access_block_configuration or {}
+ for field in PUBLIC_ACCESS_BLOCK_FIELDS:
+ if public_access_block_configuration.get(field) is None:
+ public_access_block_configuration[field] = is_pabc_none
+
+ regional_endpoints = {
+ t: endpoint.replace("", context.region)
+ for t, endpoint in DEFAULT_ENDPOINTS.items()
+ }
+ access_point_arn = f"arn:aws:s3:{context.region}:{account_id}:accesspoint/{name}"
+
+ access_point = GetAccessPointResult(
+ Name=name,
+ Bucket=bucket,
+ NetworkOrigin=NetworkOrigin.VPC if vpc_configuration else NetworkOrigin.Internet,
+ PublicAccessBlockConfiguration=public_access_block_configuration,
+ CreationDate=datetime.datetime.now(tz=datetime.UTC),
+ Alias=alias,
+ AccessPointArn=access_point_arn,
+ Endpoints=regional_endpoints,
+ BucketAccountId=bucket_account_id or account_id, # TODO
+ )
+ if vpc_configuration:
+ access_point["VpcConfiguration"] = vpc_configuration
+
+ store.access_points[name] = access_point
+ store.access_point_alias[alias] = bucket
+
+ return CreateAccessPointResult(
+ AccessPointArn=access_point_arn,
+ Alias=alias,
+ )
+
+ def get_access_point(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName
+ ) -> GetAccessPointResult:
+ store = self.get_store(account_id, context.region)
+ if not (access_point := store.access_points.get(name)):
+ raise NoSuchAccessPoint(
+ "The specified accesspoint does not exist",
+ AccessPointName=name,
+ )
+
+ return access_point
+
+ def list_access_points(
+ self,
+ context: RequestContext,
+ account_id: AccountId,
+ bucket: BucketName = None,
+ next_token: NonEmptyMaxLength1024String = None,
+ max_results: MaxResults = None,
+ ) -> ListAccessPointsResult:
+ # TODO: implement pagination
+ # TODO: implement filter with Bucket name
+ # TODO: implement ordering
+ store = self.get_store(account_id, context.region)
+
+ result = []
+ for full_access_point in store.access_points.values():
+ access_point: AccessPoint = select_from_typed_dict(AccessPoint, full_access_point)
+ result.append(access_point)
+
+ return ListAccessPointsResult(
+ AccessPointList=result,
+ )
+
+ def delete_access_point(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName
+ ) -> None:
+ store = self.get_store(account_id, context.region)
+ if not store.access_points.pop(name, None):
+ raise NoSuchAccessPoint(
+ "The specified accesspoint does not exist",
+ AccessPointName=name,
+ )
+
+ def put_access_point_policy(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName, policy: Policy
+ ) -> None:
+ pass
+
+ def get_access_point_policy(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName
+ ) -> GetAccessPointPolicyResult:
+ pass
+
+ def delete_access_point_policy(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName
+ ) -> None:
+ pass
+
+ def get_access_point_policy_status(
+ self, context: RequestContext, account_id: AccountId, name: AccessPointName
+ ) -> GetAccessPointPolicyStatusResult:
+ pass
+
+
+def create_random_alias(name: str) -> str:
+ return f"{name}-{''.join(choices(ascii_lowercase + digits, k=34))}-s3alias"
diff --git a/tests/aws/services/s3control/test_s3control.py b/tests/aws/services/s3control/test_s3control.py
index 1543a9a1c65a9..21c4ee478aa64 100644
--- a/tests/aws/services/s3control/test_s3control.py
+++ b/tests/aws/services/s3control/test_s3control.py
@@ -1,66 +1,365 @@
+import contextlib
+
import pytest
+from botocore.client import Config
from botocore.exceptions import ClientError
-from localstack import config
from localstack.constants import (
TEST_AWS_ACCESS_KEY_ID,
TEST_AWS_ACCOUNT_ID,
TEST_AWS_SECRET_ACCESS_KEY,
)
+from localstack.testing.aws.util import is_aws_cloud
from localstack.testing.pytest import markers
+from localstack.utils.strings import short_uid
+from localstack.utils.urls import localstack_host
-remote_endpoint = config.external_service_url(protocol="https")
+s3_control_endpoint = f"http://s3-control.{localstack_host()}"
-@pytest.fixture
-def s3control_client(aws_client_factory):
- return aws_client_factory(
- aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
- aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY,
- endpoint_url=remote_endpoint,
- ).s3control
-
-
-@markers.aws.unknown
-def test_lifecycle_public_access_block(s3control_client):
- with pytest.raises(ClientError) as ce:
- s3control_client.get_public_access_block(AccountId=TEST_AWS_ACCOUNT_ID)
- assert ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration"
-
- access_block_config = {
- "BlockPublicAcls": True,
- "IgnorePublicAcls": True,
- "BlockPublicPolicy": True,
- "RestrictPublicBuckets": True,
- }
-
- put_response = s3control_client.put_public_access_block(
- AccountId=TEST_AWS_ACCOUNT_ID, PublicAccessBlockConfiguration=access_block_config
+@pytest.fixture(autouse=True)
+def s3control_snapshot(snapshot):
+ snapshot.add_transformers_list(
+ [
+ snapshot.transform.key_value("HostId", reference_replacement=False),
+ snapshot.transform.key_value("Name"),
+ snapshot.transform.key_value("Bucket"),
+ snapshot.transform.regex("amazonaws.com", ""),
+ snapshot.transform.regex(localstack_host().host_and_port(), ""),
+ snapshot.transform.regex(
+ '([a-z0-9]{34})(?=.*-s3alias")', replacement=""
+ ),
+ ]
)
- assert put_response["ResponseMetadata"]["HTTPStatusCode"] == 200
- get_response = s3control_client.get_public_access_block(AccountId=TEST_AWS_ACCOUNT_ID)
- assert access_block_config == get_response["PublicAccessBlockConfiguration"]
+@pytest.fixture
+def s3control_client(aws_client_factory, aws_client):
+ """
+ The endpoint for S3 Control looks like `http(s)://.s3-control./v20180820/configuration/
+ We need to manually set it to something else than `localhost` so that it is resolvable, as boto will prefix the host
+ with the account-id
+ """
+ if not is_aws_cloud():
+ return aws_client_factory(
+ aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
+ aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY,
+ endpoint_url=s3_control_endpoint,
+ ).s3control
+ else:
+ return aws_client.s3control
+
+
+@pytest.fixture
+def s3control_client_no_validation(aws_client_factory):
+ if not is_aws_cloud():
+ s3control_client = aws_client_factory(
+ config=Config(parameter_validation=False),
+ aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
+ aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY,
+ endpoint_url=s3_control_endpoint,
+ ).s3control
+ else:
+ s3control_client = aws_client_factory(config=Config(parameter_validation=False)).s3control
+
+ return s3control_client
+
+
+@pytest.fixture
+def s3control_create_access_point(s3control_client):
+ access_points = []
+
+ def _create_access_point(**kwargs):
+ resp = s3control_client.create_access_point(**kwargs)
+ access_points.append((kwargs["Name"], kwargs["AccountId"]))
+ return resp
+
+ yield _create_access_point
+
+ for access_point_name, account_id in access_points:
+ with contextlib.suppress(ClientError):
+ s3control_client.delete_access_point(AccountId=account_id, Name=access_point_name)
+
+
+class TestLegacyS3Control:
+ @markers.aws.unknown
+ def test_lifecycle_public_access_block(self, s3control_client):
+ with pytest.raises(ClientError) as ce:
+ s3control_client.get_public_access_block(AccountId=TEST_AWS_ACCOUNT_ID)
+ assert ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration"
+
+ access_block_config = {
+ "BlockPublicAcls": True,
+ "IgnorePublicAcls": True,
+ "BlockPublicPolicy": True,
+ "RestrictPublicBuckets": True,
+ }
+
+ put_response = s3control_client.put_public_access_block(
+ AccountId=TEST_AWS_ACCOUNT_ID, PublicAccessBlockConfiguration=access_block_config
+ )
+
+ assert put_response["ResponseMetadata"]["HTTPStatusCode"] == 200
+
+ get_response = s3control_client.get_public_access_block(AccountId=TEST_AWS_ACCOUNT_ID)
+ assert access_block_config == get_response["PublicAccessBlockConfiguration"]
+
+ s3control_client.delete_public_access_block(AccountId=TEST_AWS_ACCOUNT_ID)
+
+ @markers.aws.unknown
+ @pytest.mark.skip(reason="Moto forces IAM use with the account id even when not enabled")
+ def test_public_access_block_validations(self, s3control_client):
+ with pytest.raises(ClientError) as error:
+ s3control_client.get_public_access_block(AccountId="111111111111")
+ assert error.value.response["Error"]["Code"] == "AccessDenied"
+
+ with pytest.raises(ClientError) as error:
+ s3control_client.put_public_access_block(
+ AccountId="111111111111",
+ PublicAccessBlockConfiguration={"BlockPublicAcls": True},
+ )
+ assert error.value.response["Error"]["Code"] == "AccessDenied"
+
+ with pytest.raises(ClientError) as error:
+ s3control_client.put_public_access_block(
+ AccountId=TEST_AWS_ACCOUNT_ID, PublicAccessBlockConfiguration={}
+ )
+ assert error.value.response["Error"]["Code"] == "InvalidRequest"
- s3control_client.delete_public_access_block(AccountId=TEST_AWS_ACCOUNT_ID)
+class TestS3ControlPublicAccessBlock:
+ @markers.aws.validated
+ def test_crud_public_access_block(self, s3control_client, account_id, snapshot):
+ with pytest.raises(ClientError) as e:
+ s3control_client.get_public_access_block(AccountId=account_id)
+ snapshot.match("get-default-public-access-block", e.value.response)
+
+ put_public_access_block = s3control_client.put_public_access_block(
+ AccountId=account_id,
+ PublicAccessBlockConfiguration={
+ "BlockPublicAcls": False,
+ "IgnorePublicAcls": False,
+ "BlockPublicPolicy": False,
+ },
+ )
+ snapshot.match("put-public-access-block", put_public_access_block)
+
+ get_public_access_block = s3control_client.get_public_access_block(AccountId=account_id)
+ snapshot.match("get-public-access-block", get_public_access_block)
+
+ delete_public_access_block = s3control_client.delete_public_access_block(
+ AccountId=account_id
+ )
+ snapshot.match("delete-public-access-block", delete_public_access_block)
+
+ with pytest.raises(ClientError) as e:
+ s3control_client.get_public_access_block(AccountId=account_id)
+ snapshot.match("get-public-access-block-after-delete", e.value.response)
+
+ delete_public_access_block = s3control_client.delete_public_access_block(
+ AccountId=account_id
+ )
+ snapshot.match("idempotent-delete-public-access-block", delete_public_access_block)
-@markers.aws.unknown
-def test_public_access_block_validations(s3control_client):
- with pytest.raises(ClientError) as error:
- s3control_client.get_public_access_block(AccountId="111111111111")
- assert error.value.response["Error"]["Code"] == "AccessDenied"
+ @markers.aws.validated
+ def test_empty_public_access_block(self, s3control_client_no_validation, account_id, snapshot):
+ # we need to disable validation for this test
- with pytest.raises(ClientError) as error:
- s3control_client.put_public_access_block(
- AccountId="111111111111",
+ with pytest.raises(ClientError) as e:
+ s3control_client_no_validation.put_public_access_block(
+ AccountId=account_id,
+ PublicAccessBlockConfiguration={},
+ )
+ snapshot.match("put-public-access-block-empty", e.value.response)
+ # Wanted to try it with a wrong key in the PublicAccessBlockConfiguration but boto is unable to serialize
+
+
+class TestS3ControlAccessPoint:
+ @markers.aws.validated
+ def test_access_point_lifecycle(
+ self, s3control_client, s3control_create_access_point, account_id, s3_bucket, snapshot
+ ):
+ snapshot.add_transformers_list(
+ [
+ snapshot.transform.key_value("Name"),
+ snapshot.transform.key_value("Bucket"),
+ snapshot.transform.regex("amazonaws.com", ""),
+ snapshot.transform.regex(localstack_host().host_and_port(), ""),
+ ]
+ )
+
+ list_access_points = s3control_client.list_access_points(AccountId=account_id)
+ snapshot.match("list-access-points-start", list_access_points)
+
+ ap_name = short_uid()
+ create_access_point = s3control_create_access_point(
+ AccountId=account_id, Name=ap_name, Bucket=s3_bucket
+ )
+
+ alias_random_part = create_access_point["Alias"].split("-")[1]
+ assert len(alias_random_part) == 34
+
+ snapshot.match("create-access-point", create_access_point)
+
+ get_access_point = s3control_client.get_access_point(AccountId=account_id, Name=ap_name)
+ snapshot.match("get-access-point", get_access_point)
+
+ list_access_points = s3control_client.list_access_points(AccountId=account_id)
+ snapshot.match("list-access-points-after-create", list_access_points)
+
+ delete_access_point = s3control_client.delete_access_point(
+ AccountId=account_id, Name=ap_name
+ )
+ snapshot.match("delete-access-point", delete_access_point)
+
+ list_access_points = s3control_client.list_access_points(AccountId=account_id)
+ snapshot.match("list-access-points-after-delete", list_access_points)
+
+ with pytest.raises(ClientError) as e:
+ s3control_client.get_access_point(AccountId=account_id, Name=ap_name)
+ snapshot.match("get-delete-access-point", e.value.response)
+
+ with pytest.raises(ClientError) as e:
+ s3control_client.delete_access_point(AccountId=account_id, Name=ap_name)
+ snapshot.match("delete-already-deleted-access-point", e.value.response)
+
+ @markers.aws.validated
+ def test_access_point_bucket_not_exists(
+ self, s3control_create_access_point, account_id, snapshot
+ ):
+ ap_name = short_uid()
+ with pytest.raises(ClientError) as e:
+ s3control_create_access_point(
+ AccountId=account_id,
+ Name=ap_name,
+ Bucket=f"fake-bucket-{short_uid()}-{short_uid()}",
+ )
+ snapshot.match("access-point-bucket-not-exists", e.value.response)
+
+ @markers.aws.validated
+ def test_access_point_name_validation(
+ self, s3control_client_no_validation, account_id, snapshot, s3_bucket
+ ):
+ # not using parametrization because that would be a lot of snapshot.
+ # only validate the first one
+ wrong_name = "xn--test-alias"
+ wrong_names = [
+ "-hyphen-start",
+ "cannot-end-s3alias",
+ "cannot-have.dot",
+ ]
+
+ with pytest.raises(ClientError) as e:
+ s3control_client_no_validation.create_access_point(
+ AccountId=account_id,
+ Name=wrong_name,
+ Bucket=s3_bucket,
+ )
+ snapshot.match("access-point-wrong-naming", e.value.response)
+
+ for name in wrong_names:
+ with pytest.raises(ClientError) as e:
+ s3control_client_no_validation.create_access_point(
+ AccountId=account_id,
+ Name=name,
+ Bucket=s3_bucket,
+ )
+ assert e.match("Your Amazon S3 AccessPoint name is invalid"), (name, e.value.response)
+
+ # error is different for too short of a name
+ with pytest.raises(ClientError) as e:
+ s3control_client_no_validation.create_access_point(
+ AccountId=account_id,
+ Name="sa",
+ Bucket=s3_bucket,
+ )
+ snapshot.match("access-point-name-too-short", e.value.response)
+
+ uri_error_names = [
+ "a" * 51,
+ "WRONG-casing",
+ "cannot-have_underscore",
+ ]
+ for name in uri_error_names:
+ with pytest.raises(ClientError) as e:
+ s3control_client_no_validation.create_access_point(
+ AccountId=account_id,
+ Name="a" * 51,
+ Bucket=s3_bucket,
+ )
+ assert e.match("InvalidURI"), (name, e.value.response)
+
+ @markers.aws.validated
+ def test_access_point_already_exists(
+ self, s3control_create_access_point, s3_bucket, account_id, snapshot
+ ):
+ ap_name = short_uid()
+ s3control_create_access_point(AccountId=account_id, Name=ap_name, Bucket=s3_bucket)
+ with pytest.raises(ClientError) as e:
+ s3control_create_access_point(AccountId=account_id, Name=ap_name, Bucket=s3_bucket)
+ snapshot.match("access-point-already-exists", e.value.response)
+
+ @markers.aws.validated
+ def test_access_point_vpc_config(
+ self, s3control_create_access_point, s3control_client, account_id, snapshot, s3_bucket
+ ):
+ pass
+
+ @markers.aws.validated
+ def test_access_point_public_access_block_configuration(
+ self, s3control_client, s3control_create_access_point, account_id, snapshot, s3_bucket
+ ):
+ # set a letter in the name for ordering
+ ap_name_1 = f"a{short_uid()}"
+ response = s3control_create_access_point(
+ AccountId=account_id,
+ Name=ap_name_1,
+ Bucket=s3_bucket,
+ PublicAccessBlockConfiguration={},
+ )
+ snapshot.match("put-ap-empty-pabc", response)
+ get_ap = s3control_client.get_access_point(AccountId=account_id, Name=ap_name_1)
+ snapshot.match("get-ap-empty-pabc", get_ap)
+
+ ap_name_2 = f"b{short_uid()}"
+ response = s3control_create_access_point(
+ AccountId=account_id,
+ Name=ap_name_2,
+ Bucket=s3_bucket,
+ PublicAccessBlockConfiguration={"BlockPublicAcls": False},
+ )
+ snapshot.match("put-ap-partial-pabc", response)
+ get_ap = s3control_client.get_access_point(AccountId=account_id, Name=ap_name_2)
+ snapshot.match("get-ap-partial-pabc", get_ap)
+
+ ap_name_3 = f"c{short_uid()}"
+ response = s3control_create_access_point(
+ AccountId=account_id,
+ Name=ap_name_3,
+ Bucket=s3_bucket,
PublicAccessBlockConfiguration={"BlockPublicAcls": True},
)
- assert error.value.response["Error"]["Code"] == "AccessDenied"
+ snapshot.match("put-ap-partial-true-pabc", response)
+ get_ap = s3control_client.get_access_point(AccountId=account_id, Name=ap_name_3)
+ snapshot.match("get-ap-partial-true-pabc", get_ap)
- with pytest.raises(ClientError) as error:
- s3control_client.put_public_access_block(
- AccountId=TEST_AWS_ACCOUNT_ID, PublicAccessBlockConfiguration={}
+ ap_name_4 = f"d{short_uid()}"
+ response = s3control_create_access_point(
+ AccountId=account_id,
+ Name=ap_name_4,
+ Bucket=s3_bucket,
)
- assert error.value.response["Error"]["Code"] == "InvalidRequest"
+ snapshot.match("put-ap-pabc-not-set", response)
+ get_ap = s3control_client.get_access_point(AccountId=account_id, Name=ap_name_4)
+ snapshot.match("get-ap-pabc-not-set", get_ap)
+
+ list_access_points = s3control_client.list_access_points(AccountId=account_id)
+ snapshot.match("list-access-points", list_access_points)
+
+ @markers.aws.validated
+ def test_access_point_regions(self):
+ pass
+
+ @markers.aws.validated
+ def test_access_point_pagination(self):
+ pass
diff --git a/tests/aws/services/s3control/test_s3control.snapshot.json b/tests/aws/services/s3control/test_s3control.snapshot.json
new file mode 100644
index 0000000000000..b64b4038fc072
--- /dev/null
+++ b/tests/aws/services/s3control/test_s3control.snapshot.json
@@ -0,0 +1,414 @@
+{
+ "tests/aws/services/s3control/test_s3control.py::TestS3ControlPublicAccessBlock::test_crud_public_access_block": {
+ "recorded-date": "24-11-2023, 21:36:06",
+ "recorded-content": {
+ "get-default-public-access-block": {
+ "Error": {
+ "AccountId": "111111111111",
+ "Code": "NoSuchPublicAccessBlockConfiguration",
+ "Message": "The public access block configuration was not found"
+ },
+ "HostId": "host-id",
+ "Message": "The public access block configuration was not found",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 404
+ }
+ },
+ "put-public-access-block": {
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "get-public-access-block": {
+ "PublicAccessBlockConfiguration": {
+ "BlockPublicAcls": false,
+ "BlockPublicPolicy": false,
+ "IgnorePublicAcls": false,
+ "RestrictPublicBuckets": false
+ },
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "delete-public-access-block": {
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 204
+ }
+ },
+ "get-public-access-block-after-delete": {
+ "Error": {
+ "AccountId": "111111111111",
+ "Code": "NoSuchPublicAccessBlockConfiguration",
+ "Message": "The public access block configuration was not found"
+ },
+ "HostId": "host-id",
+ "Message": "The public access block configuration was not found",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 404
+ }
+ },
+ "idempotent-delete-public-access-block": {
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 204
+ }
+ }
+ }
+ },
+ "tests/aws/services/s3control/test_s3control.py::TestS3ControlPublicAccessBlock::test_empty_public_access_block": {
+ "recorded-date": "24-11-2023, 22:02:08",
+ "recorded-content": {
+ "put-public-access-block-empty": {
+ "Error": {
+ "Code": "InvalidRequest",
+ "Message": "Must specify at least one configuration."
+ },
+ "HostId": "host-id",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 400
+ }
+ }
+ }
+ },
+ "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_lifecycle": {
+ "recorded-date": "28-11-2023, 22:45:37",
+ "recorded-content": {
+ "list-access-points-start": {
+ "AccessPointList": [],
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "create-access-point": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "get-access-point": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "CreationDate": "datetime",
+ "Endpoints": {
+ "dualstack": "s3-accesspoint.dualstack..",
+ "fips": "s3-accesspoint-fips..",
+ "fips_dualstack": "s3-accesspoint-fips.dualstack..",
+ "ipv4": "s3-accesspoint.."
+ },
+ "Name": "",
+ "NetworkOrigin": "Internet",
+ "PublicAccessBlockConfiguration": {
+ "BlockPublicAcls": true,
+ "BlockPublicPolicy": true,
+ "IgnorePublicAcls": true,
+ "RestrictPublicBuckets": true
+ },
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "list-access-points-after-create": {
+ "AccessPointList": [
+ {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "Name": "",
+ "NetworkOrigin": "Internet"
+ }
+ ],
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "delete-access-point": {
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 204
+ }
+ },
+ "list-access-points-after-delete": {
+ "AccessPointList": [],
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "get-delete-access-point": {
+ "Error": {
+ "AccessPointName": "",
+ "Code": "NoSuchAccessPoint",
+ "Message": "The specified accesspoint does not exist"
+ },
+ "HostId": "host-id",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 404
+ }
+ },
+ "delete-already-deleted-access-point": {
+ "Error": {
+ "AccessPointName": "",
+ "Code": "NoSuchAccessPoint",
+ "Message": "The specified accesspoint does not exist"
+ },
+ "HostId": "host-id",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 404
+ }
+ }
+ }
+ },
+ "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_bucket_not_exists": {
+ "recorded-date": "28-11-2023, 21:39:20",
+ "recorded-content": {
+ "access-point-bucket-not-exists": {
+ "Error": {
+ "Code": "InvalidRequest",
+ "Message": "Amazon S3 AccessPoint can only be created for existing bucket"
+ },
+ "HostId": "host-id",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 400
+ }
+ }
+ }
+ },
+ "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_name_validation": {
+ "recorded-date": "28-11-2023, 22:16:51",
+ "recorded-content": {
+ "access-point-wrong-naming": {
+ "Error": {
+ "Code": "InvalidRequest",
+ "Message": "Your Amazon S3 AccessPoint name is invalid"
+ },
+ "HostId": "host-id",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 400
+ }
+ },
+ "access-point-name-too-short": {
+ "Error": {
+ "Code": "InvalidURI",
+ "Message": "Couldn't parse the specified URI.",
+ "URI": "accesspoint/sa"
+ },
+ "HostId": "host-id",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 400
+ }
+ }
+ }
+ },
+ "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_already_exists": {
+ "recorded-date": "28-11-2023, 22:31:38",
+ "recorded-content": {
+ "access-point-already-exists": {
+ "Error": {
+ "Code": "AccessPointAlreadyOwnedByYou",
+ "Message": "Your previous request to create the named accesspoint succeeded and you already own it."
+ },
+ "HostId": "host-id",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 409
+ }
+ }
+ }
+ },
+ "tests/aws/services/s3control/test_s3control.py::TestS3ControlAccessPoint::test_access_point_public_access_block_configuration": {
+ "recorded-date": "28-11-2023, 23:15:08",
+ "recorded-content": {
+ "put-ap-empty-pabc": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "get-ap-empty-pabc": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "CreationDate": "datetime",
+ "Endpoints": {
+ "dualstack": "s3-accesspoint.dualstack..",
+ "fips": "s3-accesspoint-fips..",
+ "fips_dualstack": "s3-accesspoint-fips.dualstack..",
+ "ipv4": "s3-accesspoint.."
+ },
+ "Name": "",
+ "NetworkOrigin": "Internet",
+ "PublicAccessBlockConfiguration": {
+ "BlockPublicAcls": false,
+ "BlockPublicPolicy": false,
+ "IgnorePublicAcls": false,
+ "RestrictPublicBuckets": false
+ },
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "put-ap-partial-pabc": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "get-ap-partial-pabc": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "CreationDate": "datetime",
+ "Endpoints": {
+ "dualstack": "s3-accesspoint.dualstack..",
+ "fips": "s3-accesspoint-fips..",
+ "fips_dualstack": "s3-accesspoint-fips.dualstack..",
+ "ipv4": "s3-accesspoint.."
+ },
+ "Name": "",
+ "NetworkOrigin": "Internet",
+ "PublicAccessBlockConfiguration": {
+ "BlockPublicAcls": false,
+ "BlockPublicPolicy": false,
+ "IgnorePublicAcls": false,
+ "RestrictPublicBuckets": false
+ },
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "put-ap-partial-true-pabc": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "get-ap-partial-true-pabc": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "CreationDate": "datetime",
+ "Endpoints": {
+ "dualstack": "s3-accesspoint.dualstack..",
+ "fips": "s3-accesspoint-fips..",
+ "fips_dualstack": "s3-accesspoint-fips.dualstack..",
+ "ipv4": "s3-accesspoint.."
+ },
+ "Name": "",
+ "NetworkOrigin": "Internet",
+ "PublicAccessBlockConfiguration": {
+ "BlockPublicAcls": true,
+ "BlockPublicPolicy": false,
+ "IgnorePublicAcls": false,
+ "RestrictPublicBuckets": false
+ },
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "put-ap-pabc-not-set": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "get-ap-pabc-not-set": {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "CreationDate": "datetime",
+ "Endpoints": {
+ "dualstack": "s3-accesspoint.dualstack..",
+ "fips": "s3-accesspoint-fips..",
+ "fips_dualstack": "s3-accesspoint-fips.dualstack..",
+ "ipv4": "s3-accesspoint.."
+ },
+ "Name": "",
+ "NetworkOrigin": "Internet",
+ "PublicAccessBlockConfiguration": {
+ "BlockPublicAcls": true,
+ "BlockPublicPolicy": true,
+ "IgnorePublicAcls": true,
+ "RestrictPublicBuckets": true
+ },
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ },
+ "list-access-points": {
+ "AccessPointList": [
+ {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "Name": "",
+ "NetworkOrigin": "Internet"
+ },
+ {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "Name": "",
+ "NetworkOrigin": "Internet"
+ },
+ {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "Name": "",
+ "NetworkOrigin": "Internet"
+ },
+ {
+ "AccessPointArn": "arn:aws:s3::111111111111:accesspoint/",
+ "Alias": "--s3alias",
+ "Bucket": "",
+ "BucketAccountId": "111111111111",
+ "Name": "",
+ "NetworkOrigin": "Internet"
+ }
+ ],
+ "ResponseMetadata": {
+ "HTTPHeaders": {},
+ "HTTPStatusCode": 200
+ }
+ }
+ }
+ }
+}